var/home/core/zuul-output/0000755000175000017500000000000015137115527014534 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015137125647015504 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000341750615137125555020277 0ustar corecorem|ikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs,r.k9GfD -/I_翪|mvşo#oVݏKf+ovpZjC4%_̿f\ϘקjzuQ6/㴻|]=ry+/vWŊ7 .=*EbqZnx.h{nۯSa ׋D*%(Ϗ_϶ݬvGR)$DD D~m{]iX\|U. $ॄKЗ/83Jp ώI8&xėv=E|;FmZl8T*v (6pk**+ Le*gUWi [ӊg*XCF*A(-aD~JwFPO7M$n6iXύO^%26lDt#3{f!f6;WR.!$5 J:1*S%V!F([EbD]娍ԹiE03`Cfw&:ɴ@=yN{f}\{+>2^G) *ɚL}ӄ]C }I4Vv@%٘e#dc0Fn 촂iHSr`岮X7̝4?qKf, # qe䧤 ss]QzH.ad!rJBi`V +|i}}THW{y|*/BP3m3A- ZPmN^iL[NrrݝE)~QGGAj^3}wy/{47[q)&c(޸0"$5ڪҾη*t:%?vEmO5tqÜ3Cyu '~qlN?}|nLFR6f8yWxYd ;K44|CK4UQviYDZh$#*)e\W$IAT;s0Gp}=9ڠedۜ+EaH#QtDV:?7#w4r_۾8ZJ%PgS!][5ߜQZ݇~- MR9z_Z;57xh|_/CWuU%v[_((G yMi@'3Pmz8~Y >hl%}Р`sMC77Aztԝp ,}Nptt%q6& ND lM;ָPZGa(X(2*91n,50/mx'})')SĔv}S%xhRe)a@r AF' ]J)ӨbqMWNjʵ2PK-guZZg !M)a(!H/?R?Q~}% ;]/ľv%T&hoP~(*טj=dߛ_SRzSa™~Qta[I!;c%6$V<[+*J:AI \:-rR b B"~?4 W4B3lLRD|@Kfځ9g ? j럚Sř>]uw`C}-{C):fUr6v`mSΟ1c/n߭!'Y|7#RI)X)yCBoX^P\Ja 79clw/H tBFKskޒ1,%$BվCh,xɦS7PKi0>,A==lM9Ɍm4ެ˧jOC d-saܺCY "D^&M){ߘ>:i V4nQi1h$Zb)ŠȃAݢCj|<~cQ7Q!q/pCTSqQyN,QEFKBmw&X(q8e&щu##Ct9Btka7v Ө⸇N~AE6xd~?D ^`wC4na~Uc)(l fJw>]cNdusmUSTYh>Eeք DKiPo`3 aezH5^n(}+~hX(d#iI@SlWaaB)"F,u@30YQg˾_YҊŏ#_f^ TD=VAKNl4Kš4GScѦa0 J ()¾5m'p/\խX\=z,Mw˭x:qu礛WԓL!I xӤ1(5AKRVF2ɌУլ F "vuhc=JS\kkZAY`R"Hr1]%oR[^oI]${&L8<=#0yaKL: JJl r;t#H+B|ɧJiM cm)>H=l}.^\ݧM<lu Y> XH\z:dHElL(uHR0i#q%]!=t_쾋-, vW~* ^g/5n]FhNU˿oۂ6C9C7sn,kje*;iΓA7,Q)-,=1A sK|ۜLɽy]ʸEO<-YEqKzϢ \{>dDLF amKGm+`VLJsC>?5rk{-3Ss`y_C}Q v,{*)ߎ% qƦat:D=uNvdߋ{Ny[$ {ɴ6hOI']dC5`t9:GO: FmlN*:g^;T^B0$B%C6Θ%|5u=kkN2{'FEc* A>{avdt)8|mg定TN7,TEXt+`F P |ɧ<Ғ8_iqE b}$B#fethBE;1"l r  B+R6Qp%;R8P󦟶Ub-L::;Ⱦ7,VW.JE:PgXoΰUv:ΰdɆΰ (ΰ0eTUgXun[g, ׽-t!X򴱞_aM:E.Qg1DllЊE҉L ehJx{̗Uɾ?si&2"C]u$.`mjmƒVe9f6NŐsLu6fe wkىKR%f"6=rw^)'Hz }x>1yFX09'A%bDb0!i(`Z;TyֻΗ|ִ0-6dӞˮǗP ּ}{SN )ǘ΁ՁҺy mڜ]Lr*?Y0P`OCF"&FA*tonzwSj^-%[ R'l}jdX*kj1H`z8F5]We߷CJ0TTƩ0RxSe;>/ ђ(9Uq EmFjq1jX]DןR24d XrH_HI\:U}UE$J @ٚeZE0(8ŋ ϓ{Ba>EE衢^}p/:F?}bi0>Oh%\x(bdF"F 'u Qx`j#(g6zƯRo(lџŤnE7^k(|(bƥYr猸p$nu?ݣ RF]NHw2k혿q}lrCy u)xF$Z83Ec罋}[εUX%}< ݻln"sv&{b%^AAoۺ(I#hKD:Bߩ#蘈f=9oN*.Ѓ M#JC1?tean`3-SHq$2[ĜSjXRx?}-m6Mw'yR3q㕐)HW'X1BEb $xd(21i)//_і/Cޮm0VKz>I; >d[5Z=4>5!!T@[4 1.x XF`,?Hh]b-#3J( &uz u8.00-(9ŽZcX Jٯ^蒋*k.\MA/Xp9VqNo}#ƓOފgv[r*hy| IϭR-$$m!-W'wTi:4F5^z3/[{1LK[2nM|[<\t=3^qOp4y}|B}yu}뚬"P.ԘBn방u<#< A Q(j%e1!gkqiP(-ʢ-b:dw>w3C=9k-{p>րd^T@eFZ#WWwYzK uK r؛6V L)auS6=`#(TO֙`mn Lv%7mSU@n_Vۀl9BIcSxlT![`[klzFض˪.l >7l@ΖLl gEj gWUDnr7AG;lU6ieabp៚U|,}S@t1:X _ .xI_7ve Z@7IX/C7@u BGڔE7M/k $q^hڧ};naU%~X!^C5Aw͢.@d!@dU}b? -ʏw |VvlK۴ymkiK% 0OFjT_kPW1mk%?\@R>XCl}b ,8; :.b9m]XaINE`!6uOhUuta^xN@˭d- T5 $4ذ:[a>֋&"_ }Oõϸ~rj uw\h~M il[ 2pCaOok.X0C?~[:^Pr򣏷y@/ڠ --i!M5mjozEƨ||Yt,=d#uЇ  l]չoݴmqV".lCqBѷ /![auPmpnEjus]2{2#b'$?T3{k>h+@]*pp桸]%nĴFԨlu |VXnq#r:kg_Q1,MNi˰ 7#`VCpᇽmpM+tWuk0 q /} 5 ¶]fXEj@5JcU_b@JS`wYmJ gEk2'0/> unKs^C6B WEt7M'#|kf1:X l]ABC {kanW{ 6 g`_w\|8Fjȡstuf%Plx3E#zmxfU S^ 3_`wRY}@ŹBz²?mК/mm}m"Gy4dl\)cb<珂k'JE%"2.*""]8yܑ4> >X1 smD) ̙TީXfnOFg㧤[Lo)[fLPBRB+x7{{? ףro_nն-2n6 Ym^]IL'M+;U t>x]U5g B(, qA9r;$IN&CM(F+ hGI~Q<웰[, qnriY]3_P${,<\V}7T g6Zapto}PhS/b&X0$Ba{a`W%ATevoYFF"4En.O8ϵq\FOXƀf qbTLhlw?8p@{]oOtsϑ`94t1!F PI;i`ޮMLX7sTGP7^s08p15w q o(uLYQB_dWoc0a#K1P,8]P)\wEZ(VҠQBT^e^0F;)CtT+{`Bh"% !.bBQPnT4ƈRa[F=3}+BVE~8R{3,>0|:,5j358W]>!Q1"6oT[ҟ^T;725Xa+wqlR)<#!9!籈K*:!@NI^S"H=ofLx _lp ꖚӜ3C 4dM @x>ۙZh _uoֺip&1ڙʪ4\RF_04H8@>fXmpLJ5jRS}_D U4x[c) ,`̔Dvckk5Ťã0le۞]o~oW(91ݧ$uxp/Cq6Un9%ZxðvGL qG $ X:w06 E=oWlzN7st˪C:?*|kިfc]| &ب^[%F%LI<0(씖;4A\`TQ.b0NH;ݹ/n -3!: _Jq#Bh^4p|-G7|ڸ=Bx)kre_f |Nm8p5H!jR@Aiߒ߈ۥLFTk"5l9O'ϓl5x|_®&&n]#r̥jOڧK)lsXg\{Md-% >~Ӈ/( [ycy`ðSmn_O;3=Av3LA׊onxlM?~n Θ5 ӂxzPMcVQ@ӤomY42nrQ\'"P؝J7g+#!k{paqTԫ?o?VU}aK q;T0zqaj0"2p؋9~bޏt>$AZLk;3qUlWU Ry==qrwMm[eG`̵E$uLrk-$_{$# $B*hN/ٟKVMٞM9$1#HR1(7x]mD@0ngd6#eMy"[ ^Q $[d8  i#i8YlsI!2(ȐP'3ޜb6xo^fmIx nf^Lw>"0(HKkD4<80: M:'֥P!r "Lӓݰ@ 9n# " $fGgKQӦ4}Gn\^=-Y5PI dPN6 Ozځ/פ|5) F[ڣ$2*%&h v%9HN H~Q+oi?&۳)-nqK?2ސv/3,9ҮT9Cef˝49i.2DxatC<8iR/ƬйR֌vN8J"iJ. T>)qaY4ͬlyg "]BvW#99`TegõII kюHLa^c&/H^FFIu`2a$mc Ry+R:LڕDܓ>Y:]t.+|PT6=qWe0NƏw<6o3mv8k vGOfpEOkÈWȤMف lOc;SR&.w,qk>MPs+Xh4iyuGRd֞q鮺]m S{}]U kV0/ŜxtADx"Xh4|;XSxߵă@pE:y]/"(MCG`ʶϊGi+39#gNZYE:Qw9muB`9`LDhs4Ǩ9S`EkM{zB<˙ik; JD;;3!4 2Y.$Dwiu|+lO:k$]ԜYLUҞ6Eք._8xS] _N_Z6KhwefӞ@蹃DROo X"%q7<# '9l%w:9^1ee-EKQ'<1=iUNiAp(-I*#iq&CpB.$lٴާt!jU_L~Tb_,֪r>8P_䅱lw1ù=LAЦz38ckʖYz ~kQRL Q rGQ/ȆMC)vg1Xa!&'0Dp\~^=7jv "8O AfI; P|ޓܜ 8qܦzl5tw@,Mڴg$%82h7էoaz32h>`XT>%)pQ}Tgĸ6Coɲ=8f`KݜȆqDDbZ:B#O^?tNGw\Q.pPO @:Cg9dTcxRk&%])ў}VLN]Nbjgg`d]LGϸ.yҵUCL(us6*>B 2K^ sBciۨvtl:J;quӋkKϮ듃ԁ6Y.0O۾'8V%1M@)uIw].5km~Ҷ綝R(mtV3rșjmjJItHڒz>6nOj5~IJ|~!yKڮ2 h 3x}~ے4WYr9Ts] AA$ұ}21;qbUwRK #}u'tRx}fvvPXdQSg6EDT:dׁz^DjXp͇G|X5Q9K$)U?o': .,wؓaՁ_ 3]Q16ZYafuvrq^ѷQT},!H]6{Jw>%wK{)rH+"B4H7-]r}7v8|׾~Us?yWfv3>xpRҧH-EeJ~4YIozi:nq Vq8swHOzf ̙eX-4`TDGq G.tݻgq74ŠqBFf8 9Fk Afq#ϛa$!qNCJ4bnvB @W,v&- 6wCBjxk9ᤉ ,Asy3YޜZ4ΓVYf'h?kNg?҆8oC!IMo:^G10EY↘H:L@D+dˠUHs[hiҕ|֏G/G`' m5p|:9U8PZ7Yݷ/7cs=v{lLHqyXR iE^1x5/[O6rpP40ޢE_A͝ Z5 om2p)lbp/bj_d{R\' 礅_}=\:Nb{}IStgq$<$ilb)n&  $uT{wD]2cM(%YjDktByxVl巳1~jpd1O9Á%˧Byd}gs9QNʟ. /ӦxbHHAni5(~p>/O0vEWZ nY3 cU $O,iLacoW1/W=-kqb>&IL6i}^^XpCŋ݃k-$pxbڲ&6*9mg>{rtD)wQ`pkKyt1?[ˋZ5NhfӛŮ Qu8Y4?W֫/&W˸~%pqq{% ?K~,#/0'NZ׽Kq^ėSJ6#j8GO[ PCbʍN^XS&}E9OZ]'t$=tnn&nu [}Ab4 +OLuU{0fIb { O݂9x 8/{&Ά+4*Iqt~L4Ykja?BHyݪkIf-8>V#ہll/ؽnA(ȱbAj>C9O n6HNe">0]8@*0)QsUN8t^N+mXU q2EDö0^R) hCt{d}ܜFnԴ.2w⠪R/r| w,?VMqܙ7;qpUۚ5Tnj ۝jlN$q:w$U>tL)NC*<` `)ĉJآS2 z]gQ)Bی:D`W&jDk\7XD&?Y\9ȢG:${1`+i n8=%Ml%İȖb7AޗuV3A7ำqE*\qb'YpuHƩҬV nm=Ɂ-2=|5ʹ zi ' ׹U>8bK0%V\ t!Lku`+]c0h&)IVC)p| QUA:]XL/2La[Xѓ F;/-rtx-rei0hE˝ݸDt#{I} `v;jUvK S x1Q2XU&6k&lE"} Q\E)+u>.,SzbQ!g:l0r5aI`"Ǒm O\B!,ZDbjKM%q%Em(>Hm 2z=Eh^&hBk X%t>g:Y #)#vǷOV't d1 =_SEp+%L1OUaY쎹aZNnDZ6fV{r&ȑ|X!|i*FJT+gj׾,$'qg%HWc\4@'@—>9V*E :lw)e6;KK{s`>3X: P/%d1ؑHͦ4;W\hx锎vgqcU!}xF^jc5?7Ua,X nʬ^Cv'A$ƝKA`d;_/EZ~'*"ȜH*Duƽ˳bKg^raͭ̍*tPu*9bJ_ ;3It+v;3O'CX}k:U{⧘pvzz0V Y3'Dco\:^dnJF7a)AH v_§gbȩ<+S%EasUNfB7™:%GY \LXg3۾4\.?}f kj· dM[CaVۿ$XD'QǛU>UݸoRR?x^TE.1߬VwխmLaF݄",Uy%ífz,/o/Z^]ݖF\\UR7򱺹...m/~q[ /7n!7xB[)9nI [GۿsH\ow!>66}եl?|i [%۾s& Z&el-ɬeb.E)բA l1O,dE>-KjLOgeΏe|Bf".ax)֒t0E)J\8ʁ,Gulʂ+lh)6tqd!eó5d ¢ku|M"kP-&ђ5h ^pN0[|B>+q"/[ڲ&6!%<@fpѻKQ31pxFP>TU?!$VQ`Rc1wM "U8V15> =҆#xɮ}U`۸ہt=|X!~Pu(UeS@%Nb:.SZ1d!~\<}LY aBRJ@ѥuȑz.# 3tl7 ]وb Xnݔ[TN1|ttc‡-5=VrPhE0Ǐ}Wd|\aD;(;Ha.]1-{s1`HbKV$n}Z+sz'ʀ*E%N3o2c06JZW?V g>ed\)g.C]pj|4逜*@ nBID f"!!*7kS4޷V+8弔*A19`RI/Hй qPq3TY'퀜+/Ĥ'cp2\1: 0mtH,.7>\hSؗ΀ѩ آSNEYdEcaLF&"FhQ|![gIK v~,Jc%+8[dI368fp*CDrc3k.2WM:UbX[cO;R`RA]d+w!e rr솜[/V`+@;Τ`5d0ϕ_Lع`C"cK>JG.}Ε00e>& 2䯫vNj31c$ i '2Sn-51Y}rE~b>|Ď6Oj~ebIapul9| 3QtUqSCxTD7U9/nq.JYCtuc nrCtVDƖϧ;INOKx%'t+sFUJq:ǫf!NRT1D(3.8Q;І?O+JL0SU%jfˬ1lމZ|VA/.ȍȱh M-r ~[0AG꠭y*8D*-Rz_z{/S[*"꫒?`a;N6uilLn<Yllmb rY״͆jqTI!j.Pٱh s!:W_´KxA|Hk1nE6=W|$O -{]1Ak$ ѫQ6Plp;3F$RveL l5`:~@c>q,7}VE-Q8W70up˳ A¦g/OEU:غA>?=CۣPqȅlW11/$f*0@б 2Dݘrt +qrx!8 J&[V =͋A,z`S,J|L/vrʑ=}IhM4fG(Ȋ1{TT%41Oa'$ 4NkmnGE/K(Mp\ugfֵqdԔ @|ٞ_ %?YĊl$zthtMRyGI X&uR]OX &o$ObFR,4-nƏX < xGTebT،c mPjr(f Za4ڪnFWqVܞ~c47PњA "y=ZOj˸͙4Z"j׬){1j 7F70ʔ6q ڜ;ͱBj.(Jg]=L["+8v}3ٗ! m|v+ {,kO!n/UyHCa&;}LNay猾&{}LNfwvB Z2<1 >5 Aﱘ1?z";N-gK0ё7 d`?2>߰ gөmޅi__M".k'U3Uͨ%F` ޴2>1]۷?J?Y:}^_wO>v7'3J~}}*IJĖ0a0:B% []x-:}wT[ eIۗIaB^TۮEyW#$ЈDfN]Y'+bZrm=%ն;ˤjt5FYEϬ Ƅ,;",wMq\kX\ &3+$Ψ"iVC>=*® 4x2Rߜo" ︵u(ɼhY"{'y!{_n`Jft>1[ь 7Ʉ!r^ T O(A(f"'%hUMEѦ>1:]&t,ĤO.HeMR0Ա;tE2 1Y4MY4g`e=Sc)ϷU{K$.Ic1Iry7Lٚ&5x[$̳vȬUq3 ʧ?4 DV,AM`tM %Dg~at^{r@asC/] ]$sr`pNNiɿ<# ~xZiΣzlVV8x~:/}Pu\fH>m$G[~9 "g2Eg%vDS% *fr3}\>Icwz|^G]o!B v#F'zU[*32oP\sǿ y RIレ~c$74Wt⺻;ߣ4hDf14|_W`G1i\e@)Ao[?DFs6vn D]FhZq V0x5TD*ԎiM,}3 a8O)KqėZqTW)2m^eE6]B9,*C[{^(((!W]9>t"~~~rH&@ޫc9n2!oތfnNj6<N(ي.Q_m?NZ8# `u$m&K}z]r$ hA2A?弋\W@ MD L B'qE00THuPHL8@j0Kj޿MB˻ EwKzDq%TAUR yS dӂj5>9> t+c=Uc"8K\p|ŋ Joxxz[wg0mCZ>FQA>0q v7.*V8 ٠i'`2?0mIDӄSmB}U,4}L9%v(?հ}JC WDv^8SG[WFp6x)ג=ѯ?M r.#swB#-M+6i,AR֤(\45ydE[Q9,`u<W4ɒ8:AR$RƕEqyxӴF%0TyL`Q+z(]#=`J(UxhUl>KhD3 WgfeUx٥<ԝ*"o]$I-q$v|vMVYTV ۶42.5JUt/c%ضhpx"@sQ[G!\/tk`GnL`}k%Ss|``f<L ܆AupHE}Yȧzˤ..)W!Ǵ%$eZS p-빸52:˛"FtHO@V3kk ǩ55%rYZjޠM ZV&jϵ)P?U~>lr:0.*&*#F2K^ل \WuVWM>2:@B6Py="m"9ݫa<Hgۂ@L>=`8Q=]uJ )g[2Vh*I^/;:3Yޖ4X~y0#5qNE^|C, \RFuǍ[+?FgفYHpP^3u un捥!x}sWd}?ΌE2_H! D[..V#.ňC]D'mn/9‹:2jG-`OM k l]Y`MB`[b8ԓhKɐ83h1oDw1UuET̺`]y`,in.~B@Vmm8:*m t0$lb=j$(уʺ Oǰw/ %r Qfy!M@܈fx-wKe֝du.W ,pBb="vH}s;ykhed@JpIDC!$ucUbD_ǒ{Ek!J! =-:1ў%[ YHBl4K3:ucaYQ<֌C]K|l++n}h7Eت@PmԲm]@]</"{wwlykǺϧg'E}j uEVu*F u>q-9mzhnDsEfLV4(pTa"5#׹}=ΣG|n鍲sH?ҝNJq<7 ]_6 yFzqh1vL} qXPNL?9N\g FP`6&hVY>[e逪Y%* 3@E=P)tDyt(fm=d .*pa(E-kMCxw]k8hٺ].g;[u)[]lD "z,+ҥ*+94NP>CC8\*Gۺ=nwqxx]ij"gb,Rgoﳮ˴mw6]V`EDEE7Y`U`1aV|n]MwcivG0*g|t cÃUK?g3T=H'bGEu}US"gABA<;r?-H;=u)"sư0}{HŝKs&zjpM6r$xDCT]Nova6("]M(n{sSr}E%#nឞ \]FB¼2X ]_WvWfRMQi1O@Xg׬GnӒ8@3wgtNjo人 2+d"9f.YϚݏd[7] wO(>o|p7S"G ٴ\ d=ʍlF^G9tJ#lH6,{Cu<˶$&a{-K]nFb"4{*P _\bH,B5K$gK_2'+{{nX' F jB-F; DWͮnH6ߺ6ٸqeۿ/- {;tqm$Io6qZ /_C;0!&؞+_};/݋dz4k4!6gOܓhgDۍoZ@;T.LL!moga!gJw e(J.g{t^H{ 86'P⁄X hV(01QC^7[`eU9Ń{Ҹ8_jGAo^O\DK>e;7)(Fd_a; 9!q ߵ4U KOξ6ե40yZ_mcK[d$`F׎Df#dwfHj$ˮcv)P<3sy1ՉBƫ#f@ SE Jw܎œLD]}|c!i2b_'췍2+"r/J{m\zeE irIbę\%kg:ZUQBu~Ϫ?3$0:~Q?d*RwQI}@p)LkU \F_8)+?O6<_HT{!GԆE6u8.f?tF BK"NycT*'^}^7oNtkڊIQxC_|޾{oϓ>={x.pvpßvrGC}Cv W@ѰдA #H.b3 L/~ •D@b][έ:tc7AHq+@]2EPNzfG|QEP43iv^0 a7Tږg 4/q]#>bf< Ǵ/Ќe44E[;DAmI= iݒ* IJw5k= cTutM/6簶FoˎcwK~yׁY#|x`jͿ04Z<{Cf`:1:u[x5֭ǁ^L_|t`pUeO\ݸ|=|qDî݇^GBvw$!&Xv70ti[u 5+=eN.<{zQLznc~|шXg ׷\Spy 2M 6ί0atvt|Kx;z|E6wo~=&@Ɛ@$8<;f728Cn[3jvgYtME)f-3sa$2L325pU`Tq(p~$:H}_() Et:8 -/9^bv|+K-׏f&j=qsM虦,P&^^ֱN^2TpL;`uwIt՘R>B _"D_'t~+Q%~ϋdlF`>J6MDxE!`%:1VXU5d_ffpʻ׊Spj}͛uLMj0kJ4Zғ& }vfw\+-6-00/4ɋ`1<89bCW~?+g\/E隸Z~;I'yqGC/#\շ=FYN#O/'Q} T8͎2,NW)߾O15t] .ZxjCdqFtbٹ$-3%(9OC>q'ͮ/[-5. 4򍨍n@.C a?78`c2 yETԵ+^LI[T,m :ojEII/?vT[C(_%oA(ߞP4Bm]R͜&?qVʷjfwڧΓZZ[kmO4R) ^%ނP{{BjH*:GoxqqM8&ȄMj#0]!wd/9 6 3+ A镁Up<*a!UBO:d 67E*Z9Y !XȧqݫGƓОz靄`jaV'YRM2H6 F\x&<8|s.Rxzmp'c7̈d c4>i"V."*ߏtw$}R^>BtwM OF@*I*,wDZtIPOaq )iH^|L7E7+rR˧VrvhC"x yMÓ⒡Go_8mä7FRb[vGkF]T`֑@󨺩2~]#o7QSSnb6G:&aUHn2XOda94ؿg>7M6X !ĉB³J*Ӊ~ MX)$gJe^ O.mǯFn $x}n5G/mb$뗋Q$ʩ2L&bj?u`AydSAM Ӂf'f2nT(VtoHKi Gy1N g1W:7a յ_,KV(Z:A ;eZ)JCpٶj B ZdEL;ydhNn4VRmD[kD:4O};{Ny+m+IҨg8Ř`Y~o W%yIł+jؑ ْafq"4͹5>kw,l>(Uq=O7W!~*QVco޸7?NvYE!x=+縻xmtZ慛NR>N?==(Z4Wit|T0 ڜV6~Qt :сNE-W,3gYoh#S#7!7i NGnpY(ܹH:;?hPEqdi(YLG5 ~ro3?9/Iptq<ޒ)vq0xLY84̂-qeB63x#08o"Ym0g;Z>F%[v`@XUU_!5RdtجF84A BҘ졗cO(V&A0 n5(pV#̊&7X+"%Db4f+E˼,)m/:"1a8}Zx&)vq?$B,$!w1b^oHpnQ1b}y=/CB烣>[t~}>D^:z#Y$A>VSZe+Q2KVA`,8#iZ$9A(ZxbgiZZ0Dٖ PbĐ,ap4TTYN521kU!΍%`{1o<"&h\:Tfzh V@wēK;2l f$er`^S.*YH:7r}prpg=Խa*A81쵦IBɛNؼڭHlO6KY7Iٜ*gC9EEwo "#vwƙ*S%wc##)V@is$IC.7׌ -O 'eK_ "xLc$9ӲW;lw/W]:'W@z_FlW![KOՅ0޷[G\jEY4M4g1fmstuQ!ܲ0<\h{LbU}F:bMT< HbB{ )$Vαg̶5H:4z)p\^폰eIBuՊ&F1g]3BI_Ǚ$zrlGP3OL g>E͊dE^][Ze\` zNXxM*,N"Ig8&Bgf0=-~}qluJL80Zc)uWCR%&Cy?xݩGSrpc%6-['ܻJHD/unN7ߜiHr#E?3虆Da]޶j+ۘW)ru0udZcrTּ|dj`5FܔZT? GT?YOȭHpTfQqBL܈ nQ4b8ЏF&qilL.L9X&jK1I|M6I s58J:0:8F2~%15/wDLhV \IGRa${V}sp^l{y%RnTH?# O;(^h,{ճje .,I,?zRޭY\^;1 P12;>0-Ka{/E9581Z/ !)³,$m%!I{[xч;F.ϧϤdsBnݰ91բZf9d7t K/q ss=u*Ţx$g:fUhjUfp~h[֢feID\N U_$;x", CatNX/# K5@2%%zY%μ.j}/ũ٠B$c# Ǿ/8!8,a":}fY)GZ{ p.48LBh. c:gv7fWWefMѭ {0"U&j1Xu fG!$ܥ{K;xv #-f eyΧ3rߟo//8m PnW$npR?h1N-qpR.jl=f = púK#n!UL7xĥCȳ&{}'ap_sky;LT>:T*;6UL;F o+Mb]wJȔĸ;q7E1ѸҴ(_ӍǴo1?ƈ aQf fgf;Oh@s˴><-#Kg3KN-egN yyݮ}$8^c2Җl7/tXEpC gF/G5]fƄ"JƮ,H:7$8SdNa# KזAOR85&3GIzTG/!Hդ3&*{UrֳtRQ--#iݢpeN1Ѡ${GBO\:QF`jo`UhV3dR`MUÈa^6;,E}^]= qRVVF@b tf.X tB.N=dTYÛ^oIL(O쁖`G5XMr cQXF>rOυW fs{GEN8Jsq{Ip^WyGaYjB!iP0m G- /jp}3~ʷw48EL5m=.FLƄB|D)%u)ɦ8XWϻ>eq O\7l73$ڹ7?>OC&dm@T>vbp# xHƎ.a~{i]% C»l`SPQEfdMkd kUĂOa$9؏^w$ifsw :8R{Rry://$8na{~\ePn5]#(5%G^=O.R\p--_0O7g>͇*M/_DؼT-2 ϡբTJ[lY>ܮR\?S隉騇(<})/l K^D^h13O҈E0|ag~#ю7}ˤAX2-4&R2ǚ !3")/,8Y}n?eI᲼Ip ⚷ꑯ%O/ojod$0 $VL88ѥ q<6a@^p<3}!NR$\ł%3:UXkPn1ؗjڭhzp~ڢފDx.Kf+0mQs7ec0p#NA].ӗ3Op"5Ll %?KTQRI,FOBK%jCw=u dICb""mw_ژ7_.h{Hv띂!ƚt:ƅx!@(B6 Gbm`pzS/-aF t^uVoe=2pa(pȅ !3ӗyTpY8<b6>.=? ==rQ7 pn5mQYɡ_JIB0$aceex(}c$| *jxh'T aabccNE"%SpQ~?N*|I6?OӣwI_Q $.Y>|wL>NZ={o\e҂iZ|T7XN;܅guGc(QE"<kҏ1# Q6[ 7|fV0A=XsA뿶Y D8s?̃_h}}0^^˜qnz3xbAf+*$+㗽JmYez*X2 p0A'#Zm\"#XN9-S/t8jg(QĽ*dtnT_tT n"{:g~sz.:߅lǿy ~O0)W擟9֏_%?){ozn|G{ N Mz/>s+Y'Vw%)88`߯ptpm]&[t&;gFn~t5ߌ''!RI;@6Pvc4|1: ƷS T Ў˯)G[px@ u$ۛ&Ecx7P> G S@ Ȉ90?}K~<;;JJ$_G*w8y#ߴ4sAx8eA-d4< y(C?x _ϿDzvyfT[) NJ wռ82ƢgeME?mFwvo1GAZl0Of>H;b2S^hoSXf^SKBF`Y^a7 ٰps'lwxO06kpӱ:"R'Rmq 9#Xm'+satgS!¬gr͛4KTXg!g*H+5ydܯd ̲6*) #vÞ/v&"DMxǸuZKD{#0K5"De3".' fo.ɒ] 3\M&iLgԑf R{pUr<* a=kvuߵ6[鿙@.cPl,6`$8C4a8J%@_ - ><-/8/kO-q D򄇼JQ`@FMCX yc`7>޽OF@6]M5fC= @ufl_y =Ԇ?D9ϼ{z/}hj]>Z0"|3 x0d6fF%C 4ƤVO@7#im K&zejF3D~9fT4C{C7 ЌʻF?dttm&a ޞC7ݠ#t7:E>YSØb ź2}*|[@k@=s۟z8\c 47t\ۓ푖D7ծJ:*|&|R~6fWU~t2T ײ Qa d~>d 935vJ9y=5q8F䘰.]Q?U2# EXG@`L -V|nA "{ژu۷?7w?ndV@*#ݥGч+_OI93cĦ<>9 |k'\Ȭf,B>pќ_cjY6`b(+y:<n3.7 K &qIn{@c<5ˤVG^h FAe;6p'$[H i]ypWBbZoMi sJ4P@|]v9[}*8" HzY(qGngȭهn*ǵJz' ={m`5۹(*~Ϧb, E8{ZYGLFz3ՙ{>Ps=i6WgV7j dyά>0!ZMS56(M cx,tP@MS[vPXO|zaBŬRYR\:WI҉IFPQ ɆbpM5#pUHhjad`OeU]0Ɗ/P2OwIY M23tytvs\kf)S6 ˥sj[g cgKfRh²`Qc[^>h<ĠH`\GLuX=|tyi)nA$^{Ef")O`pJpfZSz9GYzn8ĵ$I)#tR SK22jS8@Ah}g"M ɨ S+XRؠTH@ )o3c<|&+1J SˑaއS, pV9~hW8~Zr*&YZ5DNr,b&Ä8**1K)nG-LȃtQ#ͷwaG&{2w .w@FҺy{G Ԧ32SAR/Uj)GXb1: ,3`M;šS,vJG}fLS.c [ZK`<1„R|S atS11F0%ޔ㚊֚/Xb@, V6$nmL6+-DQ" (-Ja5[d -&YI.D0VO*y܌ԟKi+V+*Q[¼Qg#B-vAD(چ=L +΀Z.<+DxǧLG#%'xKb1)Bߎz=9z=H+N SQا) 'PbjIaC RfLgxL F9RIHd2Lp\Yǡkqo:Ty '@`MKcy$> S,(J6e eʆzBl br`LAk-pyB[ l??[古REH}#mڽLXGW\#&9̇ςm7MÔWŶeQs^kz08bm*,"xLgw#J)V)JSR2 Sa OTTqh7j9M=6)rV4i "2a2KY&I%dg"1 25 !\T5 Zgj{6_!)s~wXέ7qhͱiV&,߷4h[E1XRtUutWW'$S!559M]@k'7RAu uo JEXAQip{VL #JN{$ spB+=rZLi.g"[[fWN6# HdM64f}:PLdx\ch^Ic'({\" j}f|{Z F: 0csݲէʭ.~NE"]İU=4z1]ˎ7vh|^-š (y0JJ"~XgֿJYy̳WMhNjwH4PZ+\sD_ >lD᠏ˢRՄ㩻*Hk?b548=E zd`F{cܮofHzp- "Gė N@9XZq p $BW,@Q/Y!$Sp~W`2jL7kDfU5n Lrg92e0y\~*xVPF[RDH0;$l&;gLIHHEL!eW4AJOYnyNgzBKdhxIujm*T6MHa)S)&5#LB V$YN$61&%)^KL3RQ{D*-RqDy&7 iK%~dHHkB3S cG݆oi3iL(J `qE \mEZ-/H(grV"Ɯ q99bqclZ aJ#:&61Ann ,FS1ڗdi7siu;ym%zbV^;h0h%‚ƩwF4!W;%5vRuT>%&󺶁Ω qNeZD-W߰,]ov[gZGjE' DF'& CdUTRm"*rK* Z p+4KӴV,k%A8}Jh%+j*pS;:T"^n8$UMSgL,DoL+E_aVcBASU`Vv)lĘ <|7"$;(-x: 67|\9x!ĐA+z47& MTBܚ`e)ːR2&2 R$YH"rf jE $qWza09L-(0 Y \uʞIƽ" D !b)*otp+l(A0%== pp%rZ>?y`̪]6 `gS=d `zD?4"jg\ۊTuuCi L&âm {Xu>5 -=BT]Wq'XH_+jg {?5Z#Js5ٖHB`J"W~A):6hλ^Q?=Yv]V j?$gWs1PkN0w+ 4'?fzH8wܞ51Sgpdev6~0f6^W[tgF|wZXSn|dZTX`MxZ/q鳞8¿\mgRЀr2\s^fX-8iWALV_*6̻^aRcymJjgNZ[k#=p@EHpܻ|B^LixaVl\]> @5յ[n ߇19~{'g*a9^+qi]oܞ@岰ڊV^ھ_jg)o5lp+j9ޱJ_Ol_9+tS@AH!rh4@u~& Zp;\6Ⰳށ֑}E}Q&!3w Ƅ:>yԒPw|QҎw`>8 ,QGv|( Z1"jy&<)0Ym"^awG_bbn`Xl ]'7rI{BZaw^I5xg `՚{LYjcH}mnPI660]sApi\Zi-[.@E]}Er$}?)eoJȝ{^\E1ʍ{趜EPL%X~J^cxvM]qVԞ/9Lk;H}䍿`h_0Bix9n6 ^?fd[9Q֖~Yj:&-lGZ^1&riŵRyV)/Qa :\XIp]" חU&3mU13rU|=]]Nf cy Ah|}oDr6mץ,V0b`,^.y`/0/2i>hydy;zf?Go=\/"BPݮ 租yEURK°/zm>dZ 3qy? 3˒͝lb䴛+*8۪aX#o|fwi0 RS^rd;9- -7+ee6*Lf) T鮌 +hӮ}]`#/?J|Y6٬.o~*,WY6kc^4ȥ+Gl;F详nAal^7D0sSJ-bm"5M1HL ]>Dby-Eb[}l $i9$feũR\GMM֓^dīySsܦ.zGOgFh$PAyM 04us55oJWٞse%Qו־b` 6yjI,Efy2wz*f٫yS2+,882k3i1jutq{힊2yt~FھƖ.<#'ko͢bQY`Qs[z}y6x4xخW`_an6{`0Z'h06I3]AFcZ{ٶZ&)fRh/4_s[z5=8hE#ڲBK<뿗HH8عʱ9Z?! 2keW'էyZnuFcz{1}cݫbvG#hVw-wZY-u.w |1 o8Gޜw)U52_Ug^yGwfI;%OX1P> 8$~tQ#_g^Xp81i0 qւ,t!C55oJTNz^$_={qk7ӑ4'yB8OHtAR)b=ؐܧ7%{ƕ~8OY$=e<`_A6"f7meYG=3%H([}03*Y]2rJה \]Eߚ(z*(Y=CpXr._Y5 s4qan*Ɖ'H`)WQeHQU%TwcoLL^\J݇ݿOw?vw3 56>vTp;;hH_Ӊp#tIM0!xsGţĵ<ܲv" y@tSv59MiVES/HY5_cfht0yl|*(~4R3*1&Ȱ Z^>T!k ϳ$ FR쎟JdduT'@{AwIzv)sX5?&-#l q[,BBHRURV04BpApn3LQDxټ}x#q5awۀ&N ۟.8lh¼W=4X>4MůqӀFht ~,>'[)) 5iJ%f'ia-b ,$V?}`1asY xǰ]/m[S+ h6"xeGGQiDiCgr%(eyU)y c<eg)I;'()}gL-'UX d&*;h>JU5#YfU'kTtyʡ)xoʁV囘?1h̳fՁ!j JQ!%F#49~|.s6U6?AqW3Sa,(~.9 tV.:kroey©ۧu=ao(Yp9Xe;o Ԍ э6r;$INֆ2cc8N!sY0}*b$Jfqe5rZEvX[UkL*s\:G`DLԻ pt۴x Xwas+w+8'7[LLQmm +sQydAsiA8@\}Jt浙ꂪx( _<@b$L8mP.y(ei>w7+ѫ:_s]i:FhS ]_lyS58G^ӡvPɝ(95~1+k)/}Cp :hK^<ԏW25cԍ*0Pɪx5UpCE ie"^N>rةM)?&^@#-㏦\oKQ0H `_/g2W'.Κ?b(4ˇZkM2:h 8VWJB% p8I0:yu)I׋ y[dřѠcftJӘ0ZG*__:X)vs]#QJ4_0uP)D#4:- ,7$_fBfrM^[6H`O[]55fcXSX uO LDҢv}@ލ<@sqQ>pE]5U?&4J%FUґ9/e4 ػF8LSoΏz،歍-S 9SiT{!dS9B!粜JYmx(xBdOچfc`*zT5MHr19'{W#U K 밇dl1V\`/@/\/9gI9SA.ҕ7KGK%9YxRhpWZ4C]"\>hOPbakI(WZl#e_ 5FpGb@qgI1۾Һn_7nL#Rl@r6ʹ*VO(Tt#*+9ePrQ͞FW& /eyŽó4[=,Vq<ݱ3F531?@byH>W":ȉ>+R}HXUʵK*tYfQp, X۴YXoK*!O{@sypioXU_rI9̣D^O򍑆_5yjpYcJ 1%IX^U>|\vBq7$~y@vA5qb9)6,$_E}-ӄ7P]aA j~?5Q44Rm~6|.3>?/6QCEzrPK&GSLE*k}I:G]& f E,9i sEi$U k?-q1 dY B8D/7$qΚ3lp10\*V0H:bJcpnr,^ZQ08Tj? ۍm o@ 5 t]9ʆ%9'LΘ`:^8'/:Sm{M 2UFy,HA=ѥlh)[\]sϠ@Cp-U[[:駗 |jHՓ5h#4 ^fXojV[6L_N8k2;#W)keV`bsav=^._#W~ۿ/k%Qd>f3gn~m$ J}HTA' 2lesF% <&Grj D@uW1-QY <&7VX~כ)q|ts m=֖-^z APmپ-$JItBѓe: KG<5AO"(ѹ"{K~vZewL P}[|. mP8\dO<'[G7"Z'E"^e7'ھ.;n,EeaTCٽ/E12.wypoHxZ~~AGh%1޸f4? >Ιl<ؠlC 5napT>^Y%B"zLj4*5n"Quk-sLLq A:ψŎ(O$! wA$֒%ͨ:?]z/AN `7׾M9V&Fw3mh U&}NWJ}HT1O e#х 5W9/!W;5&ث4*#^r@c;FBJ)zWy+D*梶L Q@w&nAR| 3cn4"斑(r1g(5"sS} V-^zAPپ-P4ߢϜ0j1p1yB%/1- --D ga3x"񒐚6kj47nHaޭ#<_GfI\ʚ|E7ِgS-+h/\@Bg/~[gշZWSb#4>q2>"Ԇcy׍ث5vZS)rE+Y*avqRC :8 c*!ʃ)paԨW[SR  ,0kH%mk[=6uU6ɮ6̜u[ [ Aseaz,A SG Q?nmôʝE V˪nrFhtAsY.ұ|Іi6:迧YTS{+UUFt%RFhqW 8rL3B,VpZR8Bܝ圖>`h8;V>ۗm|hă%5j3:~7@VJ w(AsmLIQq6K<˜$XP̶-XaO#xOexf7h0rw#^SɣB- _ڐ}b3p S e0M#.w/Bd.H=w IzbBdb(9$ʠ$&+b1\V'\@Jpwn پ┭;mnB-yC1:at6-T6DnC[].vﻈ7iC%7N7ea~'BQ^rE8qɿb i")nI>NP$5X9_Q-]ǔ .U"zK^2pӽӫ<ѽDt/o0UuU]QnbI ^ DEXx_̳=//'%݃v/{yLOrWG{pXXr,*SDZ!h ɔdJ;Z;E0ӷ r ;鼚JL%?.f+FC_rwz㩍E]'1ED _սdl2OM=lVWr7ZNrqݺ\|F_|k/o  EA+ `[20`zCX5 Rd,3bkmlӯPE-RGrDdy􏽂JIϷ/~8V,Gg9 B+zS% d[ 5])8'Bp8섧GQ @UqUz, lx+E 4ns*Ug] Rx;qdmU)~) Rt꣪N8.PiO{pn%Kxa&.7I$XNβQL tߣz].=jgLt#'tAFì^1)KXՃ75+ !p! >mDx*KxfbFo5?B^Ì_ GH5d qfjuB-xv7D[36v1D3YʎE5N4DHV`:p!77=-ȲS*bS*`jJQw[6o_hn2؂^ .΅S37{NV`JÃM%7Čp= b0'=tA 4lp&k&,s:c%> ~΄ cyft$Ȍy%c+IM 9pvxĻ-@\ 0V;d %Y†㘎(y,>h _c%ڒ1DMN-! z%ӰI:RIj* dWOC&"yujCT2)Kˢam#\4˶i 7}#˭ BXcpH4x{JH%~G߿?ە _& l}Ǎm@՟ 3COw`1Hjl?),h'/iA0GdB[?j[&HFZ/?j'UXd.=7n[e:K,^ѿ6ߧWMwgk| Z_~l Z9Gv w3L^{J@,&VXi͘P(̊ w•Zy# mi۟WEݦci<|Y4зP,˛gt\S|<}Lv0Zq ~{tV0| ?z{Q pQj;椃gxƔoeTd_G7vE#k߄28Ϫvfn$6uY3i?m?,!zml,mZlN=W~Uu[|c:ir=Y~c2Ư?+[L֎ePZVN3eIKoZM QmsT~ i{{.)XJ;h3̃- g'h[MGձU>?_܊kw6F\e{x[`~Sݛ\dw֠f/Vq!|T:}SVC7QAe* g<ӰVvcC܁'EΘ:]szJF~(0Y")-58.g<ʓ=8kc/gyT_ E=8n3Nx+/%d"ġ*Ro'B>O )M/<;O^90;O$c]ۈmF|݅+іA$y '5h3$րk*FETALƝ!t:\ U_`;[]h RK`Lц=ɉԝcM{4 "~;{P:}A|>C/,-NsK"P8^K!B/|c ! ,AR$͌$ 6yIYNp6Ob#)_ᣭe_@7xon:7[Bթ~gaܔ$ /}~=Pw _*UER0t%iLDG%J5lqUB3ɓ^R1 $O#O.=D;)A1c /sFf$Wy)OYn]|YnLP)ng' Km4QX+DuHñ6d.v[$d8?)jdTKL: n >c,߾ Vyk;`t-\CW)#A|Lto"K%PdKá"ɍ*py?vޟwo#F*фUǘB\|:yn28isrYJqR1]:A,wR1\4udx8e` rk>7F2E!.rLېò$ "EM;-58[^Ǿ&d<sh᝙C%wl4MBǕaJW̌iOijx1}vZ-v1}HG|͚64)Q-Yκ Kךe 2D*M<"IR,3އ#֚a~0{ӳi&'  K5tDIJAaę `OlYSc+ՐFT9(y x^;pMIXQHN盎ջ| MtDnEZk&cQAe L`7oAvI崠^҃U2*iJ]^hܣkqKש= 0v d M%6&Y` j-c{9k$T,Jv[H_#(NUHly/cK=gad>#V^ݚZ9mr!KIA݀J+g;f|)˾QMۭtۘa,\^]/(LGMB57yIEzA(8"X, Jɠ4Tz .셳u083|óu0&ᅦd΋7:D1Ae`9jv[JL%GǗZe2_ ƗZ2/H{yq/O7{zM<4?ޭODQxTv^;gk;}lfi!2o:%wVn9ABI"rI,`h2FVoKAb}tƑeWcs5mc]R\Hn2'|夈soYp20V[*WqrWGpŽǒ5z,ǁ_޵YOxnXF8 YXXUT˂<<1Rꐿ6w3)vcPjY"d) {pIJ~T~Hy)ؾ]o\Zhh&A<ڷIPrUuUT:718rpwrx75Rd$V|e3g~ Fzl^ό=Xc__~o qo㷞V&;;2JB W)s:&.&LɘkmM oފ5u3Ǐp~BRR:NYd*zB 9(e@n7D#H%TE`]QcSb@ Q ) oT hі./s㈔uQ L\K`[oW;ޡv^,:*&dm)[zbr)1(.${pl'#f/sTBX<) ΃cq:걫P/WݲJlF4&$vm2@U䦤$˲%8%[zDžw17Uљ۪2)Gb\ȝR0kG#k C{7=&8cc?iyzب#F6k%u2ğy(ݴR;cQ̳%58~j¼xA^cH` G )dapyФLlZ&?{m_bl>0rN%|ay5XYo~n=z$T=b6MXȪ"z=jbd.bCiɭnl'YF2^($U@wӿC5#[- sq'f5}h9ipP}g:pd1DE}h!DWwRe9bЛ8x*7@ & bכ: &7~ozXz9xOW^IU/ԡ1,q,K/w;t>/ 8&ΒT#φ:#>`/.67t?i Y9:X#|SeC 17Š,JLCX8B.(j$2f?P};p LkT̸&J.{$B(Im6/&@D:Ž> Z8V%'Nêqa493T8g-‘(D .'e` aq LL_ÈE4bN[}Pg݌c&F7QTTKPf(.KeJr?*o=1 #-Ou"]8Ux㘒i ᥓڗ] OŲu}`$}wGu]9cHVw#?*.{5gKcz<'R s/) 2Ly0pwq LyXMgjzSNZp&iNUbjtښ73*10qwb(a}Je>,E2*ԉ]~=U+]GKIϲdQ L^ށc`d=vD'sa#GHʏ=ҶQ*Q| L_N}W|9GQ L-sz_<=Ur2˲H:^NWI1V٢t)Yƍ#ӁOTTT8Ad=&rt8^clQH|8k]$>09NfuyxH|3,' L!sJeRC{|*0^gXϨbAD_u@uTՍ2Ъi8D'/ u Iiuv,>(ߏyw8 XϭM"mS![1V%N2#C& Ҽ$:vtQS˥!--( . Hب ]^EH͈5 p({DV?0^6Cgi)Oy? ~Xhޯ}WUvpJ]pip!&uS%ҁc`08߽x'QL5B5QF _~Qc] n`#&JTMo>1xg1ދ7nMcM53N qgTMYL@ !T}8λ/f.,C(Փ{9l-`zX'K6ĤZL`=dZN+L-n2]M<&_ !&3ϤIgͤuݛ~gu sSuc_qGMb& 9z]B#[Si(˴ t*nof́ wqQBÂTYp. UEɱ7f& biKH| \$_WM>8OA5B1=,-m=||#[e]܆j Zk,Iw_Λ}Aojo9_diwyi0]2a~eQ-7odD]n0}LJ7=^a@7f{?bsP}لSeMP >M7+!&eOuoI Dfrk&loֹK$xy{tF 0II{5I7g 1g[ +KxEh_<^e3zR{ *fWQV̟FfjMi̞l{ O&ݱcAOlmBw3'b:Au@8zf~<@۩`SO'j_٬MN6W0b#VPͨ 3')6?yK7yVtcLƔRyb42ɔd4av/9kFoߎc5"0cYV ֋ʘq p-],ܟ&6wB2<~)s Z JɊ< 4\kaQGp%NnG%k;6=Tv8mj,?=k"dE\i 6c .K%,*;<$_Gtb|0=zli\X:b65>(vI4G"8h3+pPʦFFlk)Fهejpg3cj̈qJ|]yQwa3zBy05v!S {qLxxW2/ r37"grl!i RD[CKE^iQ}WrjFovBX5Zr E1$p9l;1WlSBiN0%2 GlBFwmW 3hd7_̛/B7_cMzZ< Woljqs*O|ES],hR*Ng~wf^aDvw[p ~ju5jWxoOff(Ӕ߅L>xˋ7KZM8e\ ږdYΗSk] .&yUHhaB28]DKN9";? I mEt6>J:GR笸HKXJXRj0+?Mf[QdER~rmD.qDH2kS?D=~:~0>#M&tOk \Z Mm ]m OsY'q;<;<v/q󽛻?5GUՃ ]})H.ۛ}[7GwQǿԟX)VJΞOi%?ݗx̴AnJZo) :;qc_<EKm "mU, hv\@pDQmKƖ>/ QUWUe]53D갥\^sOc5%1K'/pg!riUştn!X|FkJ+\ffu'pg4Kmfl@WX4皔y^,=Bx%Rr3dA18t$EQm 9xTvS%^QM8i/eraV@Hx)ċOZ<BśL9j2 W%aRAys{!tPڜXӋcajPpp=bV^(¼s_t G Spnpf i;1qMN-!3O{\# $X|{\!(3Lԗ#>0Z$$Xȗ/g#KU%ⱖ$g:ϣr/-mc(qa$0Ksmy j%\L& m<ߧ+@{U$.jh^QlS' 幩,hNrsBK7Wa%5SoPSfE,I$b6w-O Γw,4;_'K6jB@V|N_k^N!k;>E_ Yr$ P1޵B\ض`M@]d}aS"%Y߯zHH34ec]5J&ۉ'R[37XLW2 5?DZ(}F⷗K{Ϭ}S5ۼ׈|f|t7] $/Ӓ&g!KDSIv?38`9 *ÔG| 8|4hac5|wvq)ȋ ^癌=4CX>nv>nn2!Ql>jtEc}RQnIiK0Ѡ/g|.<k#82/p}R8' rO͠L*ElRW8sx1& cP`p`y^WZ QSMKY0kzS"*]H4Dy0K9M  / mCk콖X-[{BTJSe :K"k*$Q}YYj@uKұ$D9LdV*F+J&SH0 qԱ/Hr78RumA}U>A } `+r嗟wvwGmr0۬w g6OF (bkvT@B<`ȕ.)s*$JJ a/SOF#7T}~aN}EƟg 5׏1NɏoON'n/#|rv[ /;V6M$o5^ O*l`MBagTD%Xd4&LZOaDE'fmMAȎ.˺e`OF؜,2@V|e]$qYj1Ԋa*; nict=q[qij ,ƛl_Qf5{l^}Ȭef &c,Gzt߼;Y~ 30lRM~EFF+l'kRo&QƖc6kwbp79=DQR·{J^oߞ` ̳]V~cͨcNfԱf1cX3X3X3ƇRT=#&:S9S9===oh(BOxV2gi[E@2Os}#u~R)ŲsI]nyv3f|@8ݟN%́ %Js|0vL}v?6E(G8eI$q&&qјG7 V&]<d/zC(ޛ!OivK󈘴H쑑#̄ % n-71nj&˻>`QQ!.(GNp$a(3/dJwntf{!& |R]w HE$J(q`?J G3<BJFa|C |1["< HK߬-|)_~=p2I 'E͢@VJ <] Xxrq]Y`5)X/8:no| CKu9B,s$f/a10 rA60z+HCZɸ/B4Hf_f>rZ|WoT:8ӹHgD.E6*EEk!P3 >m,31 b}(֘z7+cgf:Μ.eQ zPsUED1O)Tan 5vzxrQ QY\JCzo\8;W ;n&jc9x( 1"f% m'Q5j(cgj[ D|̏)$eNI^Q)X~5D4W3N l8f(q HpI$I.)D/̗@Ji7~mqo E.񡒃=5"||@.]G E!Hɉ@N!^OU\ܥt߃.=F-rN$.wuXwz<núAO)̥INA)B{I&PHYhOKPw `O>tR-9V Pk`L;!8`#pm)/-shl xWoz D>y|fC.xq$v2w-4 3g}-wfԡ8Mk=} dur}p9a\]e]rmn XHÉɝzP;!E2;.cTv7 u~ oT3r2yǕNFaXЎ A$$Ѭ.r479Xp4L`p?~a1Qo稷ӕ#5y[hf">8Z;pK3ļH)28\ ж_/R= ,܁KA{wtܹ}|!lƋj‹tjlgMP |E |oQ9z@齟߽ 3ɳz3d{>u u*WB0sPWhVN9Iw(,ޡcfbݺ}j2 ΁ oQuV@5ڌZq \U pޖscէ,F))J.'{&ϑe9!rMN[-4 3gv*|GIyRUC*yK cɇFEY#&jFTS'UIcBP  ~#I(M6FfIAQ{ V@qL%@k H󐐈RcER|M l} C 9uN--4 3G$21ш-9ApE=ɣhŴbQ[heddxdOv=li. 3Y$l[hf^6}. %`TͽYO9cQFchf:v<QC9>ƝʞލYx_[FINjıPʀQ|e@;J _FMo= .t)B0sy.h(pޘ=R:v-4J3EJ\v WW`ڥAmt4>.Bt}Zc5Q&j;*H~M/f٠nk@2Q޺S6,zUM@<6"w5꩷ϘgKG>8FI0U[4"  /5Bs'^*Mn^#XŐjSj~\~Yq-n:-8`"W a<.juc;7cPq؛kIœU !NH.ם[zU{]GRݗix>syvYCe-+z,}/ mz_æHka @t JTPSQm7篪&y8fGhĆϳ߾zzՒ/8dy}G޼`^d7t+_x-ށWwލ)@2 bJn7}?h'UܼٛK`@6hJT<?Zfj6»}ؕA/gLmeŦE7cf{gqs(x\WPW-n(FGτV( h_8DL!NDO,hn0@f X6't6og'tv3m>Ⱦ1/6UT"q^z ~5I>pu7h mjKXĦ$^2X ڂzV}'/djxf||;x;2i-S z >-e}t~ ?|_뗪]y\ͻrC~Mdf>tN&F^ַ\D2ɨFԳFg\٠J3iF<r yr?s?$u$6 k kpH"u[2IEiƥܢdJǴ4ڰP *EFfrkFm]֩Kq׼m[X &FxܸWAkK`%RyG=^ӀE>:A,$U0:FI+bCK(u:<`D(aiPᨕA LwBˁTnD@X3y¹BP+}$@dpJ4eM^m{A#-51,88p* -F%E"@0VA#5cz%7^j<âV||R$oSo0I>𬘄~,'{ɅHf9|r4V>)1S|ʝqԬYuWRh)aYa5\ȊŐH͹Xu;9_>M `^Ԇ&ѰrU>)*?Ib⵪ş[d-ڋ^-" XQPgaZ Oqa@zƙ[kz) ȸ =-~,||Oy1z;eթiu6~U4`$Y{{s["tlsV󴹘~H~hW aE܌dә8v0#QT_.)IN.Au-O^V(h$Mޫn͆f$VV$wHxs`kPfo5&LXD6r ,P*5S /e{.@"~R*E$hκ\kŴfeKBE5 @Օ.׋VX/@B>8^Q4 F] | bw^Wr^d6bZf!~>@֤`":+[W"GG[3k;Y{XDKcb&DŽx1ߣt֝aę[C @mNo&hg+m,ωށ̢G)^)/cKh%B(;%m^]t ^m$9H)H!-.= (؊ZlLdEqe[m-VV\I<[ k,_E VEjukV %ǰhl`Y&$YA[dE !5 #52(B;XN Op72&km87c3{fWHe` 4aCq (PQ~(U)dl|dN?bH'XzFigt35ƽa)rNSD0`6Q;4Gq.jhKPlDsJw_ݚ I@C;5,b/)K@}3vzie"J횏`ը~IﭤIIRT\mx>'1BT! OWZ^mR~&ÎwCOxC_o(EzY'|p8NpIԏ4bQwbm^R32ٍCZM}Wͷ~dL͸jiS} v_@| Vs5FT(oF+R6Lߏr7@}nC? SFp%ŝбF:D  v{ |@Aj>ZU*.I>@UD Du.).NwirCͻ#ذi۰M'{;-U{v nt<$U-Be{ Xֿٚ!۞@'V'pıf'3hFf\4SS_sc#,NZnY;pr[!m&ڀli1V0[[G|#6Vt 4gW!DC ѕͯJ Q~;j{Ux9IrgCh{vi%ǬI"DJ{;﮺gm=W/.<4#vYE)9B2bGOIK:0YSE:fX' <+&F#m<ZZ/ݕqb, {+/?B.WsEם=:YiQ;f o^ϴ\.2ڵL7NCSꙏ_>zMXLjYd5rq0~ κE49|kAIJ Ļvx|VU;B} ̺u)PD׽Mx:^]{6:8@oGŷ?OboYy/ṭ#Q2EGO=Ij k1x0 <94_,u[ZJl*U;0vΦqmMk\T ʔF)$d5Fc1D*k𰀭8΀ qoLB(֎@ʸczК*D#\S j8f4H"x . Sa}Q?S{Кjښ^ =j87!rFeJT`hi)1H)hw"SJ 4b%R{K1,5 m+mH$z{lX[ ?OTHʱxȡuرQU_Tק81H>"ȄT,Tm|2KJḋ`e< XRU;t*H/V3 jM]R-ޒ`8:Zڸ==NK|=gsejE-KK VO7A_l{u}t)?[ߎ.bgc7^{וguE ]ܲ}6\.OTCAG$-LCm,Yydc^DO*]?b"8b1g!E?ݑ\cS[QЊzv4_i~#8X> ̎=i{kLȻX/j?nΐG^'^L*])Pu:O@7]vWLs+7uIp:`]18`sL(QmGsc:2,"1rbrX+7Q(m_,hIc7áa_{{q1{08d8?2G?2Gffg2GfȨ'd\.rZC*Hg@GcA&&VQ{$y3jg>NLjrFXp4 ܖ'Vbpطm)G^yT1asΔЀXl l+ڐCTΑ\6H]4LRQq)8(9R1# m3)AwBz_kG]]_?,~ikz2wѭ.N [EhYi" i@l+CXcdB%ދFUKM)pZ]3Rʃp-1KȳEw6o=oE\"L+JO.}owWY-7/I];krzIf F"2ז&bb#q@ jY6@|AR%43KTxGD3nE+ J3kXFI$d>U+}Mhgeۈ?h0.b|WĆhQutbePҠQ+YESu,hN+J[EZ]x&QE蝣DHƪ:hfl,7Bka?[QFH"p ~] \-~jC /qr{ ('}rICu0,Rq7.4J~"5K`迱ZJz}Q/Uj8%sfvRw0:ө mSrX~\-JBǪ"8k^oIނSf_U (.MX0UK%44.Y>L{N "ŸW3sI*ҙ2)Ύ2\I9#8|H 7G0^UExW`Nc-р{g0z;ו./1p5l鹙/YG[{ZobU7de7, PBF kdt>{ɚL+A{NrUJE!_Z~&f8AÁS cc|(~}ˆF)YӨ WM~z _ק_:?۳WߜaN:={+pd+ ea\VS[--o>{c\/.S&JqnEzi>s\a\Īea̭]u Mm5G]κme]NyE,1'z73q鵵i_hTd\RiёhO`QzfrSǬYVLJzkͫ6d'&cS?u#sr4m'tKyc5X̤OkL(a$0-˩;rOOL&nhk/-0SDsb M( Ņܫ$ynmEe٫KNɫ-$'& P r$yl<^yGi2AK(ƔFPd\RJ_k,E VEjukV %ǰhl`Y.$Y-CQ{0rzM:HMLc1>"'^q<cΆMah2}vTpQ P(w֫XA Hf˨)"2ul .F眶pq\3yֱcPA5챹5 ҵr=Z$,Gr.Gr.Gr.G9ZuYSr.Gr.Gr.Gr.Gr-Z7[%z'GᲗn̤Vu;o=a5H3&oyTI/X|o{`"Ӽޙ-Ӽe“Li2[FZ@Z +Ihˠ+e 2\r`yQ Wz[4Zg׽Z_Zm֣HzY=9*~MF0..ө{8^ajصm o gJP M_|1lIa';IzώW/y}hn{2ۛZnNjiwLջߌ6:>fn6?>I+ >aB)8Aɂ;z&^A$7]n 4RTŶ>#NTYgr4 z cIp;Kh^7+XUW3O:%>K`jwirEPca[qLav>S`4ڳ[ p]tpR:SkYcT~ Ovvx D6A;OxU N\c֧ |.(+:8>}tz2w 'ſ=H̓W2, ddIO'YDvu f6Mt iuB+*0nX%gYhl_Vԩa'7˛&B:bŻ(B0H?LꣳiAaO y{2Z̓CFX=/ J)-1Auc˦Q7]6-?{׍d`\HYE1Yb;M>d03F=2aV,%16VUd=aWg?DygV>IK& {/K0-Уu1P~f(+z▚32¦cVI9iVw)+o ^d!2m Jv^4*2=Z| +,"TFKhXan[O7NCb 8PYY|>K߮>-LQ^g+H%9$,A5޴w(G 3Wfoj0[ؘF(Q!R}Z|tLVR+b3eIFE*5ەߧ [v!keadeJ_5ر.e{0Ey1SpL_D*<6Em͋㱿Bia}r쪸O1e]ťʆfMɭzwm88=9=cEǻ^]U7i}30˶Xy(Ucu0c5linD6.e3iU\via,L,l 9 a:ڨi0pP~&(h7[)˲hp8Љ_*!Lq̙mۡ-LQ^t6w%G]%9]zQٖnC=Z-LP/[_:C~zfS[Z iL<9Tz+"և@;:a֝8YgMݔx2;BtTtOu, ~D=W`+0/Kwi}eލ;!pg+p' jEeO>"=9uυLƄlK1Ć&lj֫s{Oz{Dx*m{9j=0JjÓ-er7El[xLR&sv߭#6<+^):s%d\ eEqFgm_B0;H穼~Uu8bAitL4ƁkoXqË/_/./Y6#7QW{srATo}򻣳w\/ؿ=HZ{COlO6w伷(I%1~u7C|w~8:8;~e*q8o(iόI__fpOĘ5{B>;c!Sٱi~fk&1ϐ8/¹v~"r_=i˿88|q)/.> 8NgrK᳏_ }A]_iz{jk/!>e'ogR{}pӚV +h軷2O|\w7ڇD㋦I' wv:;@7UOHjQvWBj/A<}EױKƒo_o]xu_7p?b_m?~s x_\jHv ߮KVn__c3G%ʻ (ћju}{ʝP!ǿoVm.q~t)Y]*GNA@{3wͯGc?Go9m?H<ǹSHRNʻ~I od1b9dYڛ 8sfr(7]0%4PLrTecgt[gon.ZYZ 1*cFlrf2/91:naZj j4U˵`0anB׫I=R9HavPGWaFd@gٙ&6|mŽNqkuc]m\sQn!.Ap+# K^l@6Lc|E (ZK5 IZV.CROm¢]Ũ2S؎~B܂KՑk0W K57L\ ۨl0P@\ BP\& HՀAqT$]ҴxU2Pf uCfVaSHusk vE |x dSP K[3q2M![W@C3SX a PeE, {`R1y^t[s26A $+ np\A8TgD5HK@z4 fb7@AVZ h\;'DC#)#m 7#똕L"g+)Ԯ!:Xe ,Æ<rl O:.B`ЃVsF<@$su$2-2qrkp4D]X80{ȋ&D,I}&@V59uet<o Q޺MX:d8 O m ̶MRlҷ k#.ؽ>i\ B"OFɋb2, Si\I-XX;K#)G]b* 7D")1wK |I1#lϮ^H0 e@;9fjDz%%0ZήqtfOs\!/|mՏ} yqp!=󼑛_@rvߦ㫅+7 $?6U7xAw@7߽u)D3]o7Wek(ً6] kA૭Yy_{Ҵ^34QYmMEXՠ62f &v=E ^2  ZUs(,YӀѠ [ݣ!-oUa+l&x&*\Fg}@NY)\V'%r% ӝXu`\`!0a`j\)a%x}]3,5VAU>G EDQuƊ T#'n@# +0  Ʋ[ǰ $> ~TvFlV01d:t.# 1CYђd 0'21\+\OD7R'|z l$V|0|f4u!‰!peR?ۼӴgowTbƲ.(%m"=^@XKBw KPh #Q̀; fYt fr聭t,  ˡ@;}|Aq,ݪɠxדQa(>&#)[`x  jiT[YlD]jl& >3̇/Gpm^bp0[@$Xy LwJFi*7L0jz@7^MlIg'1x)q)7R*+"䬹xݻ yhQHohL3{s6E>3͠m]Q09 ݟ?Eۤ3Rb{޹Y/h %Ã>zh42;֓+JFq\ܧ4UˑIJp|l%h۔{'AEø. 2{!eS2CF*1bZF|% T7|vh}}J4M_Mgg{Wk3es^JG0ѽ9zsXT/0YEq1,nE, Thz"5y;ѣN"Wsċ7F{hFgysߪ*go߲4&:5 2^\E GK;<3`M@,xMp~ 6rV(̓s% $ɛ9sx/i9B݃sȐs5Hkv{Zd!n[:@{AGd}=9V#t U:d!YuȪCVU:d!YuȪCVU:d!YuȪCVU:d!YuȪCVU:d!YuȪCVseձc*q&Hؼ-0@JI @׆^2ƟNk2r*51QnL^96h[/ ^9 gB FHXC֐5$!a kHXC֐5$!a kHXC֐5$!a kHXC֐5$!a kHXC֐5$!a kHXC֐5$X}k "fgkY\Fw,%^U+B&2 L d!@B&2 L d!@B&2 L d!@B&2 L d!@B&2 L d!@B&2 L d\&Ltw@ ٙ@ZMԳg %2a L d!@B&2 L d!@B&2 L d!@B&2 L d!@B&2 L d!@B&2 L d!@0s~@Rb V7i9~P;Gm}` DkEwT;C\RLF-_ĥ?1qٿ$NqpNxCLU̇,jҬ$m aUhzPy(!TW1",iYRVRUzH囶 n7eiXfqwLgi|f:K$hYQd.AY\ev\ei~ /\ y`r~##^qQѫ-`s0~雏;@<ś{F+D_iHS3}v_A}.Aju7Ziz~ ʜG.q1]i|)հ>/j/V*ު0Tʏ#94!B5*Y'R~7"Q&`fXE9aab3Nu8n|Mv,>ﰈ>*[kT酨?wZqpI/wieD/U)ޖy"2`R⾭>QW7({NDk.j5-NzNnqW:=]^$ϽY14kTv*I|Q5z&}x\_'Ma+d]?/gYI -I|,GQgiF骢RT|$귡u2*:ؼ|ԟßؒV/ ԸZ[8lpQ;6rW^gO[~լ#x0{s*?9O?j5< 9 wތ\Nnd0zүˏ VҲa_#ZK~bD:j7thIUY]wМz PQA?yBDa UdPK6QҠM)ʔ1ʄ%8n eX4G(ЌDۅx/[[?^ ѽA̢re=.[n_X<^xw zA`Qͣ2j 2bNfktzb=r!:zM3` o (̗"ܘ)tkrLa)SΫjZ݋s PN{ɍ|tNPk^*TJ3nTpqۓM᥯XCvɗBsU:niAc/m"z|x\ծ>i/;; v?=G̭(Ο-05sCa4c#D' ;JYJuiEfB Ub. F֧ʧ`x0<5)8Xp?A}^,˸f X+` IjcjIҞEzf__'?6^Oga,L0KoP{~vѯKEf'X OWFjMB鼫YeE9IӠnl`ż׃嬣G:{ Tu͵UkW[y$k4-wk= lWUO=ϸfr-+P:-7hwq?p?俏_O_p|J9=װz`BAo,¯w`M6N/-nӵ?O{PǗGEkq–R>(pojaZDWMFZwU57q%fjpz6Q_k\r8 1 ;"ׂ#J}}U$q3lU̖TYWUAIMNg` hwi7q2$x$1+# ew ߪe;uRG˗ciʙJHWdW-V!'¡SwGflZ聇PoW|&Jcr #e,aɻɘu=8D"Rj+P4\l}7F+#"D}x$1-AāEM/_!q RSJUԹ2x"}7xBv$ˋ$ɛ{rzG*%Ed}f5߯ ?Z>|G[M7[ٚ8vK/~/\>0E 1)T:(2QB ̗(QBD%Hʷ$O~C)gHՎ|c06E&|X|]^u0>h}7<R>x^=0>*elj$Ah4g&4Ph*`=,DuJchd'k_!'ŽA[CAx4{Z*-GHԬ=Bj4m ƁQHv{ŹҼ[O6y|E1|{z^mg|=a y&‡4.D˶qڐo$ֶ"%ܗYUZk]Bcv?UUEkS=o].Q7@romTDQJOuLy1v=wݟ vSK.'`^آ74ޜQCh zgb@\mȮĢ3< Tc, E9ٲ NIS8z)PB_m%}YJI, 0'ea?4)|腿/|/cnwRyZIvV#YJi|m @ Qׂ'JF&)C \?6blܲaB:KU1m#\R% RHfbH2\60%5!gY{8+Ir#!{ bO3E$"gCrD)Qg]]U]j Ggs~ݢ@zBItD9Q/=\4Ӫ%>c'нm{]/7vSu+ɦ;Mu)+fƩT,ϗ2韷N.PVD >MΆ]`鯹%Z eNW)~xW)~xUmϑ!\Є1!Fƅ%Ag@kAc G~OyS菋(\̼]'J%1l@k`(3*,٤EDw|kYv۲絟_?L9}t-gRbrSܟLȦ `b%R{K1,5.W<MH Z5d0>%T\{))qJ7D&Ŝ(aH˦skl6A&O&scyچhc0*¹B.+/V>ru/dk6W=0 ;#rsB2 'ISo*ˣ `U72}*9ϣ9VP$1iO箈Y sa7-^rÜ9Q [cI"mC铟<|x|X1P1;8M(rnS[GEdQ0A[uwX+"HNi>;,2ڑ\ECp96l c-a>V?dSZ_Ts][Tw]im.hyږkCX3((dQݰS Q,t3>/Fa'd(& oKkQJ' fBSu/lDw5w<y,![nu '1bePyur[`edq$W2:uMf0Mx,:o>NV+P6Я agLK%dVfOQ:ÞvF ʽwI {^ QMQqxcuW?:a%x/6V4+ l.4Iv/T :Jg,Kߝٜ8{0Z'z5اoF^.Ȗb+ z5囟2PtXioiѳZ_Lڤ3 ^:e3Q*w6D*9޹LRJHk)elmqַf^Z|8 P_WNP eOirMV^>mQTf6%IĒL 6b\Z#.7:!'Zz"'^ɧY7J{¶fc08c:ZSOAo'ϻ۳Ò*ʳ#*VhJ$p<꩗HPmO?d?XPjI\ ڿs[x^d-ח/]& _"oo@\{nиmf }3;_`|&@WHƗts`zĚ=ߗ݆;}wXJC@^ku.`UH}L²]N}8ߎM| | J f`׿2+rL-3(ẃg @[/R*I2 >D?އ9b1/{>gL WXR;-80o갸W-0#aZ)qHc zEDԮ& ?n_wYh u"6b~Q77nS:lrBmgncc0k[mΈ`9[)BOez}%G=\y;)[MScG@F,B0H?# be}0 "b*1+CʘsWaYe@%wK͚ag/,>Wo2R"{Ch5Z hM̨3hQ9R&5ruȴ2=>gGˀh*se sH_c2'4RRMbvF{Mrؽ)|=d(87ѐJDP8$zPMo0fmzo@HpGSv)8P@byc'8jG U5?%)PN=9& <nUb'ɠ _w*X!D)bOg@Gb#?BaU2 aITt^tuFݵ48djJu"OWw4+t \e40űPnݗDwls| 87ut"k,k·wډvTv guJj?OKS+LF[R 0Rhn,2t+x@lQݘV &u0hȵ1O%NM0&sԫ~ӳ `-uZuݨ$m2՟?S\13uuoThyiUNWe6빘HåiE *12MGdGCĝFDXt>̿^+ Okm? /g*gl_~.ؐ-{nRE+Yd9Aq̨aPAzA!xf"vw:mggq׳} UOP9+v.(Scߞ1Q\VuF{^ giȉR4gKro+L]N(D4h'b;hc%ƒr XQmGs:2,"1rb Z )@ESFLL˷0wcL-i{Er ,2RU!mȺ}W4tzK{bl)$6 t_gZҗԫbu%1a}Sr9kA`c5|C`pFHGp*diц=CH ~ {p9|5/bv5l$@si͙"7T\2A)A5}%-l|{mV"wGh+fV8A[FhK)Kzd|tX9c*-&BvlL$\.%6!`BO)a*5( v>%&zqyr<5p6 kie A˯]6~\ThԞoyD桓_ 9=(MZq!"cNymI`8Q)-6]B-1C.:aρIWx>]>e}/X3Ssv:RH&"LafXTY$vD0vdb“`(`_Hfl&%!!Y ^rDSy+KEZ*NH~J55H4I(3VLpcp!J "P$7jDlFjw#s_y]Q2=oпzW. %*[}p xt)B"JnSxb&ٷqƽO/ 13.|w0_aɓ"aKuQe e SV&; B!:RLݙí~9@ޞ&*a]iB\:$Tp| 鸫%I7*bnF}f^TD߇/ٝ)Kٗߪ*..Gws,rUVUMt%@Ų=P!E)UVﲹS~-uO~,݌?g5hV&ܚϝ^voJR=k%Pl~T/oLondBřRMnWMCVN1^9MQ&/dT4y2Ap6g3Z8^^NjUk &>\~+f)"~p/CK2, JɚAo`^8s`o?NyuOoz&˫7? ``T^_ _PX:鸩-PKϯzХ\/ۻt X ㈙,4Lg~L 37bZ.YB^Ҝ"L557*`j\ \y8|Few[P`?lg EV/GW}>Ҽ%Sh)͖LcQڃŠINN' (~`,[XT`#仕}=?Sǥq&{bѨȸ`GGB~k E"fҙ99uL$a^lO)Bk|H<~X3,&V+ߚ˓[i. l ΤBsK%`T;ĪHbk$NIXϻ#χf?_V(~O goO]tFwz`.M1he{@bVtg B~'sTm?^]n d-- 'hCokӸnˏo-P%OVh`!AYlwσR1ߗ{ 7R{m\l7FjG}nCk_~? (\r֋T B=c˟߽ShCfS!N"h+!3")BMƖZ ,_,,}t›6$DL r %2W|ܹ,tI2" %DiE= #LٯUViϬjfC-`RKJ;/e@e 77L'U]_whgWp ru8VK$-ƍ=JZΥCw!KJ?KO^P7SJ$v[W)mھ}rYe ,5F;~G+BNb~.?_wN9hZ@=M (%$Ak%!-TV+I5qGlud=ӧwpq{!](ITW7 TTkIOM]+Xgiq'vm^ҁg1ݸTB>+Z/H]T(, ~nn͝4&7"@k:LP 3줷xiMA#]~U@%@QGO͕S]o%v028iISSU>5d"v Ჩ;R{H S.tn#kgAz7UZ=GBؕVgM L9]ci>>k;cm rg-!8wIYE8cg ݀A(n?l叙[>szy\I0 5[MS~eE-"j= ҇|Z),m`-e@Xl 1`VR uHiӹӖN 8mmtN}h~UBĘ2QV\Ch0?<_'n*{*;'7*zsiwޘ qːJ;#QL;!T&M*B< Qb \`%InBX&"k$Jh+e7&5Mx6ӊa/{DaZv2{^6s8im"$2"$d")?r$9 PR(Q~|ʽ $Δ6Ss,$Zy.d-ɁV7"NJqBhq/b:v ƪCθ߽V+o<&_ǵcaq~I9sx-O_IN`&~ݏ ( N &:bv͕zk̛UV]O*:3L-z5":t)[/%: v:|?}0T\!3O70d.xY 2Y;rԄ#G1Bɤ(9F"Y%cؙ%r4Asl`Q"3K$)pg9yt~ljgR>JE3J~`_gùԱX6X.೏%?a憤7MYTۦںU^/¤3lύ qg(<|XROd, Zæ.Egf:)ޱ{>KS:I*ݏ󮷵5} ]1J!T> H36:鹵>˧~Tα@Gu>:k37tt~33`lYIsA),H>HE@7 S{B0K}i):W郲6LM1{?*::LMDKwzVItvk3jc^H鶵 3yMEKr =::,qorrd56GVC/NGV_U9Y]W@Zt 5UU`Ŵ;̋) PeT΂Rs.eN I5B#77q_`v!XR XYT?o ;@;nڥX@~(oOnTIbo HHG oij&"0zH% )T`qO /tIcm6)aP"# !eOd0@R*1r6F#P!c_ va7Y\'SeL,o$!ƅy(bu䂥ln?qSY2aW7n0ɱ;ٻF5xnr GZ%!4R@vmy7RHZum7,OԝDF`vJe:!]o)GوOѹlxОmFlO=3,O񷓄)ЌİS A֩{@D.MxME!i,V~,>e\ +Qᮍ}4M*H`J117z $7b<' 擎g$ONÄ)t8rtN%W0v)ґFm_ Ik˭oX hȡ{6uam5FB$@K\rc{7S`I–HJa! 4h+$@s'h)-bkjȭCJ }4^T1v72S6SЖoA̋pUܰui T܏lȵ&u}ƃ/Jm0 V\s+Ш/ex0?hZ6ώ—= هQWO8T UC*%j-Q҄#<8RV7]] WCzx%_7)i|J_Nsj>uέtF*! kߓ×{O9"Δ6Ss,$Z1WKE0~t\y&/]sOnmo&NUm;Z L$dcbrjueb~oʿ2~Rpi3m3!̔U07$2](T~^%g9F1 P:g5X{[k qZsMlr$Vj5b]\Qcs/ϧEdWUL.+l!TW7 mWEv8m@3•=@IOpyKg;k%xH]v1tY̮}7o;ocQjoogɗ`]c ~nn8:}ݠa ÔkoROwXj7q~xү,ϊV/@騃sqhj.xu?Ad;)m1oԉ^?.1!gχBbP35zm)I!x?׃dB1-j0v؝LL?L߬Q XQqE}":BL*!2Bʹ*ӌP"`hS]ZvZ$ TZɐB\h0#ɿB&ԏꗀ|:MwX'/kxHlz8HCRP$q[4kUT7IX/@0|^k%#DJj<@civ6gp[ϋb4L72~{똏>|&؈}&2~\˼ H@\ZVgj*Hn41TʄM-6üJ/nJr& isN%mُ;P}{=3*l&(aY0yY o'=q`ZuTX$-N!sg7X$#/QjO[tCy^!8-='E)E2xnfp;[J=anS`.|>MdeëlԔԨ `6xQ8練h* 5WqMs)',_  ۻwв?/x(@$SBe80()f|93xI25lĽ"9P*FVG#$K3E[ /E&H&H|9rBSMó%~n:>W>&}HQ ;?Npm0?a37 {p. y)}I\ϴc~>w.kLWjo헟N2s4, )166dʍ2{'Ӻ[P,U-bk}Lw/~NO4h5ݏiPOCzFW6tbBz]b e#*!rC )#4^]:ςo.k"-7C]/hGG} iy oĭu8ӌʠQ:x Gj Kk#PC-4))&A\h&6F "ƃjǹ2D&L M _h Ct~@%*e IxYSIoVn+*G.K .Nk^<|jha(=a*u0E JHA8jb-)<9TIpZ ߻ķ:usNI$.gJƜq1*Uh 6 ъR-2B\dP _T:ʏ%A!V]V‚kuY7!3P*}̆4Šk1Q3BіPAK/IُRKM1OVqTʬa*!x EM,NĨ gs+iZFj.|;U„Xvɗ.&^WׅI5&e$Ў]1gEw_KULqQtOUhY9 1>p0&{v>)]eĤEeq: e3 NF , yJvrMN.rbGxzw'1 «3El$Z@Jޠ& FbU0_QF܃!]^V;ϔ2B{|}"*/'wk[ Kٟj;;;_]ϕo+\{Ncp˶%$Зƿn#$Tj@}ݙ ϑgtk7w` 2=0WO({7η T K^83r?t+?$ju|5EZ`7!|odSKzӥ ]Wdɺk⺆B)B4SeFB͵ե6Jit\E-R %I0&MџVo =Fy]`-:$xbD & Y?NmuB%&e  'b4t kVtQޑ$,JVvH]IHEKsmIL: JXaB`"dZo(s J0Z[d!ĉ# f`5Ų ežVx_#Nܙb0$) 0NtNK"8 u^')jB'I2%હXAZe2 bԚh-l 65zoj^]z]wy Gz5'R4i-%T#"Ahm!kQ&{A"">xfZ,*H))60ʲ@ѠM@鉇 e*VA.=E INxpKl>[. GbS\gcN3p<fZ%ka1$Uр%|6_j[$.7 \d8Q)DRSh)yÃHp#<- $*f^Ƒ;h[^Π'waJ<\z'ȅԙ|S$.<λ{O!"?ÅΟ3'C .|((-P8qQ6[lswֵ`r۷ޠF[u4/XJH:VJw`4_ Z׌:yoks Q1#)G5X!C/Gі#z#T쮝v4ũQX^˚|TLThaY^4`$A KH!)pHL09uk$*#1a}̖4QFGυV()5!g XC#w&@)Q P)1ZZӰ1Žp .澳uZOZPf~^=?kB۬% Z{L-uǧ &V$0*ŨۄQ5* 6,NGg8m ^x1,[¡vP&\#?r!ghS CHA>QB;|Tty1eԹzz"Uo])4^ϫ}d&(Ebh18o3 RHF%*?YíN*dc VboPsHDu<}m$ D'Q"i?  2O}$*N)}t\{56+>Mٽn]ov\Y݌8elL٘|%Ϗ'z|\9U !9GWj y+zP!ǂAV 6gڄ\á Mv͝.itܬOݍ|wr9' q+J5OfFs,T2\1>"&02W `r=e_%!N aJUYϣBf x"^%.$G6*HG.Z{Eg44c5N˖ZNk9mvO}[>BxUI{6Ⱦ0KI_Kv(Q_}DdT#@9+LҪjG)6>?>.է,zvEh9MN^\\di\2Y_\w%?0 yY ǗG3M"o|  AJS L&T &eF2 ,Z#ՁHOQkSu]Y az<:s779Oͤ]*KeܭQg"ȲoZ9H >P1@C.% Jq&82Gb% zeEԄi.R2B=_n%`Y G&3C=DX֨Rn9s g2sn9NX-S0S,y7=8vQ'M]@ :mqAh?q+I-[IoRS':lUyA^ bFH;:>f+ gaњ9nSvT,̬*o@Tjz^t? 1o5cNMDO2سw+ՂOfU:фvj_վ*Mԭ4?u*-jFvǼ 9N|uA&ef⃧) gN%If's툪g%M cV5Ы#ZV5WP?w? םpVg#+K6C qj;h.ru 64qnH+Dd[hŬ&S<',U`qq+\2As, 7 # JbalR,8晷\G]`cY a>+h.CyPH49+8DHSfQ)'I$%V+p" \oBD"G+ H IIH1w' -)mCC PN)ѠgIM%)Ĺr/&ޓWIH}ǻy^$o @CSbLZ-?俧z^45-4kj*١NH64nPߤuoxg+. 5`28ڌiA:YmQ洣J@: X ʻYV[T*fxa?GrW`,Sg5AGRG)/"L:)5̴R6~2fɹTI.LؗTdjY:IOfnxY߆?$ztlCU[ gGa=`fު9]4^չ^4V,R: :z+'" i^[56Lz`Ibt.2u+¥&LI@ViǬKRa%&`XIpcElm#i n~Xk܃`Zƒ:Dd )ځ#8F\ŸFO XI:\\~J4/R,`9"E Ť"9 fEwZI%pD0AP8@7Om1'( ExjuJeMKBC +h^m#J7 xbePҠQ+.SK$-J5mH4I(3p&1P8Wjs(n5"KiZFjZ$/'ubf`'пv(u?c=_[$K))*rƝ|)0 M:>5Sbtg\uUy8R"qز <΂CGi]`AG)RL݅­=\ o/BOg_%P0 +: C(XR.*,WˢttUѿJy v65;_lOsgraRb 0/F__o.g/eR)IE#.c`je%@s9yatM~|S^x7k-F y̭^+S?ݏnL^aխ46Wb|Juːe8Ƶ`B\F&{G5XԮJU[Y)Bz(%/?/KSߏ@vNʸ3(ɰ4iQ3{1Y_ ~ۇ}?~>\aՇoA00.u`A ¯tJuҏ#r:URQJwRI'FQW˛y}ob,QO]&&fݿ,1ews~xP!%4,^jaѺ3̱8s'M'nB؆|kֶhb.06 `X|OjԭMԣʑ(wks,ib ٠p^ z]py *2Hp~!Rm X(PQ~H!#u{lr:bNWvvus[e&Lte r1K* hȋNgۀ\ wUݫ// d]JmS 5_ UoB_3[j̭%67mkiҢ3*SH/שh^H/AID[HcT!FӎP9RYU/S2:uN")np0sHg[E‰uXT$>+'1a#kh`k.PH}n4j|sWL#,8ZI}6>gwSIh}Z|^$t?*hYOt zGL9L6nmU:y[n?Δv3%]~uyjwȆ 05|3;0ͬrҦk&6^^;r8zgy2(:o;cƌL"^ˈ&Z ihju+wHxOtZTkwFSCRT `]$vYI(`1aaR"Nn ,Zw&&m͊돇yy”}Ս=@k\6)6ȡqO\544>ZR4[(#8)8>jK<#32x5)D9ׄV|.-ZOYiX#SrG#7_ L7m[*@ d `P2Ld2=)͟i]IՄ Xpy[9jga iU30JAbA3 ('R T#ȁ1Mpأ Rc-=# #$$5pFBb%#11)DXҠs0I̥&x&,rZi-Κܾs#4ڨu۔>X vԅ#.HR3o4ua6f,lҚXSW|JB"W8'6/OϜwү:I㜱ָn8KîKo8>XLQtPh|)K*Ljd<)L]SFDDԐ=EV 1RY[,ܰ{7ƭNӽMmt.K3iҡzUlOafdY7-ߜ$`"{&CDT+kKI VDpCK,s$#MMR-,2|fkrd#fa$`i3Cwr\B㛲S Ƙcxxk"&,@2Fϴ,sJÃ&6y7.h)}~otExM[xm):k,g#άM*mM#TZ*7|LBR )O4b0T@6I1%x!+aMt#x'W4xcv ncpXlcXf}*i:8m0Xs3 `Jf4e=X%'4z1FhM9&r6ȹQjEPQZ)"V_ e$ l 3ٜ>^O9vnQ4:vO [Y+d]<1RBU$znT#Ϣ>c̄hhqtKrNϾlE6b8H#m6k"RY(ǂ9qq qo  #0ƽf79rdp6( Nhq3r\z+7R'aU]!mr\0MUut$jK/*!KDkpOo |Naǜ ^o@WŐ7~+}TtO vX1w%uiwVB[EwRѩE9dwU:689nsc¶z]i< }@`r-r=V?H*ɧ rPJs\\ʿ&w_w~ W T/۩?*kX;E6M9=;pU^0Xb2K6sp Yս'ex ÌKn"#g YcI"|ZejǑE":3M<9$EO{{C݋v槺kgCNܷh޳q$W.#! x%@ 8!S"eYW=79)LjřawuMUuUu=tyϊށ=ɥDw\=1}idg2mA1,,`Dx v#EhNKyC_EFNZm 2-O=h쪎V!;W>I5=ބ, s7ܦr*R"{D;y-Xhm 4wFm6*GhXJBDL$QƜ8|8wSA6unHJeJa$rHv*SErHNɩ"Y;#RELHWLHVcOIPjSE^ad!}B p%b"LJP,^R^uB*NG\%pɈ+׮4'Fq9嘜J ~2*䩈+V ~*A TzEJ )4NF1/|pS܍!K,ه)9*8;wezP;V!LjլQ=rPG2 52ޜ/<1 ΜE:_|gջ ;S0}w|s':>/7wMBɊhp[={?9$*XGb.A 5vGɭzϻPNf+ !0.N_nQ E;Ľ91yWq:&y׻!3A_,P;z=C9#{٠$ /`*T>>TmrRS2#wj7nvl"u,^Ŝ>C̃8&]>Oߚ3ECKSAS5hK7ko[6US(|.|j2r r+򆷌^AM-A2$JTFwt d {2u4?H zpaTG|O? iK …{1^PNv;>;왲|bGxMP,Ur̤.x+,$dFx!%'onȕy[vMl@߆JlxIit>7n*H ׁJe=&PTn-Jnۢ(-Jnۢ(mERiu@ t3XvJt>O-vf 05^Vƻ5+C"Ѡ׫- 7Zęލ[ z{$TaMȬBR5y8ruXG]d3.GLcZ@Hmi9A4#e7vf/c_„Mu{=3z(ƃA\mW`U-!{ӹ3=s{3=T+X+}EO(^(xxa-)BWy\ ̮ߙݠTX{9AwT8+EǸ.GqM[[DWi ]ãVˇKl|)?vC4180 Jɨc`\b҅Vpb#V|ɕʹs3}[ g,^ڬ C~l8)!8(lA:-u:ZRW;͢ ΝSBhxaT=^5i!L'tb)c̕+Gl^E0e,ScEAGR e#QI'';o}}cmbű;g?n[~\QuruǢvkGH"u#%sZKVme= {nXUY Yz'f `fj<֠c^-ޥY3(E3f2aLccĶ>#ap)AO )8FҎYYA} (^#ZV[gKxFG7O-ݫd.خ?/ߠTOn;,^"+ ç[O,;rU׈IN"2ז&bb#q@'[cO$0Åi"P:u)L&UKwaQiƝVc  0B'L<|ʏA^~oyBeff݆$D;p1!kCK(u:<`D(aiPᨕAj.%?fjR[vD2 g{s VH0"ɸV#bdFj"l;\ %o 9ZSؙbN agB {B$Йg\qgp_E`FI8Y>f5H~ͺez?U-x?&W}V vANC!:RLݙ)pk{gu2Ybro(M! H ˥C*M\-5!]wUWBŇv~STUM>Td%O*itqq>/^*RTRjГtӱ7-K6lYTBj6I>3MOnFX~Xx{7\\T6gܚ~uojR=낣Ȭݸ о1b{=Ci:gm4N1nfPT2 0vA0Et9{?8E=>tM6-g#L#Ḿ|1Ua #~pgPɰ4hW0ۯow\wx~ߥ^z&W| f jA{l@,jetRTN/wҥFaJq\MqbW:u:[vd&}BmM Mm15GmzB\rüXWc>W"D<%/\E6 mT0^15ݲD:[RAV&a8=y*?lWMcDfAy4&6:Gy0 _QǥqFSY4*2.)hё?mZ`QtfQ#SIXT1yHDz}84ǗGQGX+NWtoux+NF/o5 (J!#16HB"2G`X\H[y._] VEjukV "%cX[C4N0ˬ ah7ơa9HxaR[ERcH1WOpہm75^=i\x'  |<#_W~o5nvew܁B,^^>m#k0.>{MR+R[0Ew,Rs5#\(f:"J*B*c+tHӑ  ׿ϒZf_,?ktEKt>[L5deE}r>X-$sjtz= ~L~:~]g4׏z Qxa^`\` NeVRgٷn875:OZ2t 闅.8 )PA=&,V)v#2xR s}3\k ¨UCusJXiwSpqؘ,\/m FYBN;/۩[3QquDO'|d&#QHYu2LwZ D佖MFS{(wP{V'N}rqr$‹4΄<(6Qҙ5B6MXSD @$,$SИ[)r-,ڸC˙CU֢!*V%x7\xʁnCN=1R6mU}ڮ8yEɋ/brK%dl"kcWZ][r ]AoVˇMzHmF~@az}[ dU420)DA$*dJ7,2>)i]IՆ X1yȻWa iU3PJa3 WGf9AQN'2G=6Hc5'Rc_i8XG!1z ",iP΁:KFbRJ<͜9-sKrIs 0,qX`m%jtNnY9nO`琍Äl". >`cXC[X@"FC,`BG% L` /@_*>|NB"W(=/s;q7Nx5}kӗG]Dh|;}И:e5IN3B!#(B#z!Uxd!R|0vM-a$EV 1BVIkSum9ytp<}njt>!ڢ9:vג)E 0בֹ0!&sA>} )/ _X9SC=A*FTC*?^Hn:Iʇ*N(@lT9S"x% LoM߳ۚg8,5-C_o(NL1+ $@Pa@#Q)u\rB73kߤcB)7j<[֑aYA[R.k%|i.#8fgl4~tͩ+wl$yN^Ko>z'q I~x$3GGa}0ƹ e_@%; x^F4!@gJI*hЗ&Z28!`S)N!)Ru /wV a<k{xBMFV@pmf7t][. kIBuI@hsT̕* r1aѩMcjW; y. u6[nnM~w{IXrzQSk4O tI, n< ωz|G3@jrv5ǛS1K?N>`wN1ՇTfF84j5-M$ȓuܝ?)LA.J4Q2(Re93'i(>^…2".zS>JB릋U[5ɳi {w`JPk:WO!Yi}CyQݬ)}7QQ)S {]Vnrgvɢ 1Lf^OƟA/Oz}>3Fw%6p A09Y38EI.\`s<q.8(EIfzn7$(vvCA9cv{ uQyuv5ׅcBY9`+w WXѥݏlW&IXqWQڸ)U];+ w䥓ȣ--(˾H,v,?$B J1uǦ(';=.?{O%P0 +: H! H ˥CRj\ 2pOc0>3(|)7NmvmJeRYW_NOOO/R)`-pM߫KrX ؊Tdur~JalyY<7›ypͪS-%1(ռR}7s"`7כK3M0Bm#ͽ_=ҧaH0a#q`,&K> o.|L<ǢuT֏O\wq66f)%k:[u6U37v?~x?cǷ߃4tpd~ٞKx\ LSK7_KIWn46f=ӥ^$sy3|T )v6-)oބϛ#Lt54 Mbh| u-uy\[v|Bǁ6iJpc)Ǒ#/cqFIN2l%0zm~޶1,- *0wHs&p'5S~b˟^q鵵iєcK 8:v5̵,J1&~QޑF楨~HUkH-'U"J%5>3)=&0JBFO s, rʰ~ꑛPm¾v롎xb1v `AuJ حw[B:}x~C&'\(Q#\b+rF(blLEqeYN#|$]||uG.X 3O(vZ%H uYf$ TE !XHMLc1>"'^q< x8p ^TpQ |<,i FXxƐ`\)Â60 q@$RH(HE݇ xĵbJ{О`TP(ɔQ8 +L"t*{~lB]vU) p0K>)K"2JW.!0HƎDM7{,ռ?sRh:Ըo]ƢZXP`Q.H ")a:'  [9-"N s0edl]>uMmͼA;p?@`NP8"L^.< &uD9h;橌du]XHO ?W_/ɰ$z+4,%r VaHkOH }< *Ky$W2:Wc`bҹVpb#VգW3֋\HVEevhnv[<5Z"Խ9 `g+WHeQwhLȇo/'S!EHAi)5 y 0sJPpGfKgid`Hj`B <$r~(.0 r# {+%R,:KtWEdvt>pM]gVQ(֢JH1I N9ť:FȌ ^`AM 3dua_kKZ4Id5I-ɯf8iBF&l_\$m\ _O_oKw1}y2pIM>bʊw~"0bQG"5eDDL ` Xy$RD; yXz_/73ؖgݠpi>LjRLG'.ʦ/eeW-oe{3z Kp3"ا,U2^JJ8%Rb%0lBndY@3-[p7}. Bw-'HhA$ahkjyFf{p߽d^0{ܟ`.{ m= V/,k3 ~2QajamWf'MD=Ȥv v30vL׍w7b´r\ས[%Co)"_nLŬ h Ak&.1 __CQmNZo }.ݓ6 p=|Elͬ: jf4 턷1**3ǡ]3Pc.;B9c諞ƆAFUq؝v87 3#-}tkBNׇ/aJ~?SJD]sc̱|:vQ cgmI 9Zr;AfxQ:WF8/t}ƖgOݯ?M#r5 ܼ>^lJNu,|E6170wCٰ>|u']V ՞@7zˋ?./o፻twPH+М.Et Q%BP106)[Z 0-nyE;] t{Ӭ oTh| aD-RG]D ٔV6&[%=)n'.쑷 {& c}6)<:o69Ipl@e2]^1цnԎŹWNl|:ǥr}4֓Yx[a{#R tՅ~Oztb>esޞ閑|u5 Cai;[UVmu;\Ʀg\`gWo0:V[9 [589αJ5?toZ$-L. uPnB@jkxdh/|^im$"seEHZWDT.j[9 SV\M7FnO־J!2%\PgUtQx)L^"G(mw5vk;g iT& fmҴfBc#ouR10|=O!j7^89t#tq([CG^£՗ϮpD"8W3[IJߢv \ ?jV!8 W2ļ8D,} #3o!$)= [V=q}ni"76m(`[Mmڭ1#t g P@r6<5Qf|6jRq*s:k#(Hp+V 3/;Vݾ_d_a卾vwbG$ȮĢwz..'=Æ02lnC݅% 0~޿NsqY=oK4 Ց 64qnH+DdsZ1I2ωK1B\4g2DHGP@ͿP(G\D)Eo;-qVh<iyoh"'£dQpt#8]CʇKGTie<'3mKG^)],7B87)#vDw~ăjnhW*C .3`9ii =sjȑzq\S9~t< :#,F8ZPIiRR̵?^Nc;%;L&=ji[lҚkttry#5ދK4.c ++:JZcL/APX9>[}Yp@-̹^lܯLլe͙_wl/_!M-ͼ@,n鼩،YEFbPQ؃L>O;ztf{2C6}!s(ĩ}?Co}5±+ur`Hrg 7lM%oOw| op^?;y eߜ|5:>846 c֮Nߊ ?vvG>'|f׫yf,El ӯ Qgi[`7sC~}od[Mc{Ӧ5ͧ:B?5Tܼe!D#~xh kE@鬏G$N7XmlJU:doZ2?iZ)CEQGOaʰC;:{?\%S'T0YbhRpA!Y@(8yK ON[ǎLGª6!ѓk(:eR$(>!9+TP-&BuoC$geEQؗ#~KQ{_]ias` l0 d㜰 Xx fҫIlN_DICLCN+Y8J~$e6G6Y"C2q(drYZ0N&=C29 D Ujr-{"!PuFеVVspB2S̠YtD$IqsZ?Yf˴$EߞZdN=I&Xl9 *Õq`DP)Xheg Ib{'y&: `IiK h/HZ[, S.ݭ)!Bb)c)60ʲ@ѠM' Z*.$IN8pKl /hA ؼϫ,9͔1uCEVdX 6@uF2R>خ5X1@F*Ndg'RSh)yÉHpcfyT#qQ٣[B/g-@[ZAekG \8-,5nkZvB\b? [+V_}"DqYeVeT;JitΗ:[!d'5t3wG{UopzGU_Bv}2gwuF@o9̢x1qCe80(3Y`j ֪\̽&5Ox&O^_|Y+[fծ `pߌ?}4M qC 40FPL)q&eh%*G9K |1y4T ى!gQ*g+&CDx4_]]X,Y sw9Y mV1G6+&j]=15WNJW u)mu sCgvHbpNK/NfLjS$ͦlinRImŪSg4$mqAS,OT˝snVN'M8K*[[U/|v'?CJ|^q'399N'0uV$\fV RiETs@ը车t27#jJhUI<Ê&ZbkiVU &VUNn"CS?Ղ:P+-*ooG 9NX \ϥXP(otqW+26t.MKwMUOA?,5A>75a{M^Q}3UUjc9G^b_ɣ7JUb`*H%c~䟻!VU`Po(xwʐ{`2J͘",݁aLf}}u9}KY$d"iY"M%!:`Czf^>_k6c `FgdQӳ(3*Љ[ ZFOݢHP֢xf-9l͉{@zT+ p~>Ē*pR6n&I LjyR& _ EJV=\'Lh k|`a%{.?)Zu1P:A,Ҝ1C6ph1Jbۖ͵Xl$VF?hew}c%Yq!"cNymI`8Q)-6ĸ! K̐䟻|5s~(=-mĺ cIvEyEl6xNQ,V6M4(p qGAk#_x,i4Rf4RLpcp!J "P$7jDt%]{OͶϮ*@{BDE#{B$]H^0 w%!Qn1VPָiP"|h\Uâej)x΃%Gԙ3(SS.RV`$Dp\ ŵb.L[{o/BOgճJ``Wt49P& -,ISpH]^U¤/< >?MOor'>r9x;-F2&w׽)ϕsqK fJ/_nR ǸvYDDCP(8tIAkvoXԮJU'[nl!=]5OiaBia׹ˁwNʅ3(NxX d~@?W߾}Û/WoD]۫'8qp .jA{XrW S8ZJ?u~ym]dDG^ZQ"j'_nnquy3 ׷mFhjiXo4Ul9jrl.kr5e~][v`! ݔ:pηM>Ƴ6J&]Gy-R FI3O׫7P\-U؏YP2MLj9+j$UQ1;6Q%V&=^*˔>u_{d} yhձjHVc=J>DzuuIU^鈏ݗ=6(O))|%zbmX"0Il.+_N9LhYOĽ H)215!c *XMkeCiPO<ڽ3ڱp3ZR֨IdKy^ isH|ܞmNuy啜,+K"C0!B*Caj3jX$"﵌FMFS.*sos@UN)txOi?jHQ8:EPGJ gp(6Qҙ5B64}XQD | HB9 ˽)pOc'p,Ǭ$WM,=|8 Ɏ"(oB;|ǣhSMu;zd"wqaճ53%#kOP{dKz\$P2zES(#Bp).}Ԗx#32x5)D ׄ^DyK]R]\RpO7r7_L7<*@} #)I| :xAGJǔƴ.Dj̙-hwDa iU3PAbJA1 ('RcT#ȁN1MP؂Cƾ# #$$5pFBb%#1)DXҠu0I̥ &hRZKi-Κ@+s!4ڨ;_].EwmۑW4l%ZC[?6cDdu0%Jf`e1ZX(Ƃx./vOXԤ~5׉!5e ӗ>}F?}\<C> ˆ^FcD2Yز&ά)#"b1h#2&"X:ky>uɸչԟBڬ> $v!={-OϸG҈ P`'UƔc S5 ӡrԅ>ō!zԾaMZQz6b9VIAEP 3HaY# m%H9DŽRn&x92J#"";X?{Wȑ &[y};6Ӣ$DҤV˘uxC.í*̈"2O94PK9]т-2v [Mf8e_6l8ܝ^~:zS%53`Yנ5!q &ÄpIEoe9=ݮZuCE_ b! BɢYIor"('҆Np唢K hyX[ ؋)k3ࢠG&z8\Med9 ͣQ=`Xnk*w+oW3H-hgy=dȺ~0fw [&nZ΢f;έ{lm[C7O3:8g׸auU8J%r= G(gLG|{>Ə WͰ'웈24 0p ( NКgYxժ|\e<+C]fra>;6;ͽyϏ w*%p Mt n•Ϗ Xq +ޏ&_" 8'PI܏\~8Q+DԶQ)g`\TI>2pу.y1g.j/=ۿE\)[%Ej8g {`Tv|/U]O0'jtx};l~P 8Nk{㏃zK{.HW> `5'/fC򤍈j1!ovSd@ wCqS"N ˽j!gE8W'=vNQryo%ore)KQVryy-ʵf'\CܑWo]z$T{o@O -]oK)ZIB$8Ur/ ֔4A}ݠSD֕LK1o1G3N ὖZ%+ N,kF9u,HəKH<&F &aDh4Z"mM>(FΎu&z4R=νcecZ*-FM=f{e:ɤ!=uQ('ɀc@EB:H)W^)3ס{C@7hgi%62ns-2oK@@Tx,Ubl8˥ҚPٽjͦ5mz,cYr,g\UU._x9ZgR.~Ƭ=/.LR ؇(2I+D(`&BHt_cyZV^p{31c`QXe(ؕ`JYoA[Q.0BFX.V2s0li 6DizSED 3' >x_GR`)lmP"1>`KHYk$!`!Xe;X`'٘/LxEHL*%EY:c.tOw #'"8Q# $Ya`0TKa=sL,`', lT vKL8^a6Ey=1ME?.Pumֿ$#^f=mŧ8uR;/k+pI>yr[Hla'tl頣B5E~)`hŭI$]@lL%bnbǽw;fhnj4\92&'1V󺟆E%yP^xy6\6b1JfNcp0sp M8c0kQY )FΎ5uj ku.hr.kUr{O=6m?5 wpKFGRX4)$[Rnمwz4~l2UVtkI3"3uhl3` YO?ϧl lNJjs]hT.[Znʱi7̳%ڙFn^XX?_{w{fTwX/l&2q۳%tǹ趥پ~;t{Yx]Sq}^~C,Cբdr}8G(/nWp-ܯJ2i*8`ĝyAQ.ƭF*{e=^9Ϫ' 5R^ "DG#q` r FPPn2+-L}sh TkW}7F84DiAԣP.“ٹb Qz4z$49. yH󳙪}(;9kWS7imLJۻd\vE~<xs M6[aoVTXCB@v3A$& 4wકc)TrJH2L}I!o|έ$*>mo`F$|/k{kB魛c`!`ƓQ^F/j~!˝&#,jۦw@_~EC^Ako u~OY);k?;k `!⭴(F"4rIhN&F #td?\%|^] ~^V9XL}t57ǼI  =TϬ2g|]a; ;/ʫ%b9$!%@hrZ$ ~F=t>6tv HuNZX2LaNaI[l*&&E%mk:q2:t7C n8l~~en^ƄLmO{/ll)6IꨂdFH@<ֽۿsyʤ\S>p~jvw.+mo IP u6p;,)w8An(nJ7)a7Q;䬣'ǎުy+&Ya:nڙ[:_ip:8c#\/9Otr_$F!GC*l9kW}yv~imXgZ|Hy9DqR*Y\pbʩcAJδ]E11]6 T&Bs@jlKmm¦9;Oh<=}==ڌνaPXUZ iLJzNC/Zt8W!=uQ('QZɀc@EB:H)W^)3s{C@7hg&mKldRj J%\ Ё(EY-,.+q;(!cӚٽ>ySj΂^oW,K)5^zZޢ!Er'"}][s7+,lR'#4.C&JjOڗupHE|;4fHx449(M QM?CccY%|՗ lӊkM.hʀC!i#BB@{;"H+_5}@́V1n0xv#lvc!WT~,";ʏJ}o#*([٨Ps#T4-oEx%ͬBJ:*bJl1p)uuDo\Hou7qu!coYȎ";6/y.u߹֚kIGnۖFͶWg%N'4GYuRՠʠ2:i sT dd`m9r$:u}UV١7[7[Ap&ge]KuiB*7^Nb鼄f)zn֢'rot/]F2tUp8oVz'aGc*Mv⁡%Q`BZ\E,Cb4sFsgX xJ:-cI2m9+"ې0<#O17 @y^kfBe-"g;Ӌ:Or/nRZ\e "䆽6~š k,1t>%3XX_O&g]I|ժT P(NI!lMR@MRZybHl7Bɕc#2&fDO |zL(SčmK!=j^ǂ2˫dnnO0z*7M>_\+Y?Ȓ(,ꌋ B) .JFmԙfl.#ҪRfA6[N+-pD o $Nf41SL,( vVbB|Y #i;&zx` =фRJZ\GL*)"r Cqi{N9}rZ9lLY)BQ[N;A^}l pPBM "2H<9(E)0 N앲9k ge8[>̗I2&O׼{hW'5h(:L,C\@` 4=ƕĒ՝[d!V!ZNdP1pDu 2 H'5KJ1BR >Qw_~l]4NM>kjfe܆:/`NE=I}<ϕQ^$dk3??|Zd2g9 J1 #h9Jie+ .#=uwxu#켷)44 񿗬.\ꦠ1_ G1p_FWl}*r.u&E"i2.`ޅwsϊf:Oe~?eXmJU &_n)6-i Rq(Msvr ‰Nә}:^ <;I#[5%'iu` ƒw8L\-.ȍhBMm)+)/ }^oF Cm{.}ZffY{4zK6FRtJi8i3ݼUR^ "uE{rR~JիF׃==h/fF6K}ǫ˷ / b,13q8Σf^%C-Wu狷nQ?imIuvkY^( cOjE3Xf|}φ^\뺱 ҅i9eHs(#-L.BijJ+E)`Cܑa&ߚF?Ɔ rzO,Ϳ7?|y/޼GuF3pB ^K~4 ڝ+j BS+Wǟ&}ܿ>(ײaَ # οB8k yۺiy3w&Fjko޵0r92إ_Zb-B ᇇuv4>:F&y6<<6f+P<Vxn><Yw0]$)"Y`mknD?s:z2;%jA8 gזZ+u IW꾴ud. ?̤ב9zQʽt&S΃:Q9)TX %Xzp2PCVXb[[iUi-Ksu"M [ Qt@!x߹[,3sl|˨Z1ԟ|/O| ꠻>rp?r8n{?j fv[}V_R^|frRf.h/NSax3Ϩa3? .&ͣfsD/;P> [+ ~9Pc!1]63bkmW4猁9v1]ߠ\UWD0? = Uu/Aq%\YJ$EY?7o/~z1R`]f'>9&>gՄaY>Cq_LBßNy䏆~? %'w1kXD־Uui`\oM]D/ WBY% cݞW ]^`>2M.@fe2)cF*ʐq9(πi4>rApeU`VIRIQ` ˣ!.GIh M` :  ȏv*[]m+%x2AL8ӣL*~AVBM5BdZ1.LZ`M%#n>iSFL15*Zc\nđЫ"i@o?ަ%3LF?|&4#:%=W>2; n~&s\^}nO>D5WD0?&]}1txNy0W2łZk^Ԛh6VGm:i;-922o04l hSI"}Ts.AU^tqddЌRChy]u)X"$BP$27>r]YE"p!a],#zY21&d?D[θ93.nH#J"̗.c&!N}*V/]m؝Ga[Z >zy i^6ض1ojyaXec%r)*K!^ ^^~G/6H ZI^1fR.թdX*ԥ,vRHpo zoA<@ Fg\ /[#gDuu_l~ʍu϶-Mvݛnd5IN»Tg(]B.3"\0Z+zyjԳ5^7?n 5S[(y+{RվiOࡴt_tpE6;n# :!0ɪ (3I,rnJ{;PgS|?vl~5P!-Vzۛj,zEھ4D_j3 OϋGB,wx)OW^_| 2"_L%U )Y!ŃC:zˣGa $r&:1:F/IVdӂ tY(c]̥VK@Kj<(u,BZYjhڨ%! wyPN mFvF2br\.8zR~o*c Q}o~\pe yXr/ Ѧgl m/%le,% R$lV&i)&@Ll6=ü35.?f#[^m bo]~s F.,KәpB.ꮄj=pBwmVڼIg` dȠA0Hrw;~K/JR)KrW7K]dQ;7} @Hu0w̪f^ݬ#bihjnB| 5hd\yDt5N{է^Nhg_:[_ᔮ,7#B:p^} u,ԕo"Kd*gE"KO̓v}YL+(}9'3M94 Lh|DD3K:m\}׉fp-t($1 堓\J!ZmM*8u AFAZ$^*Cʒ2'A̅ƒHmdd[Z3s5-g iL'N) ^~Af:\n_a4lڡ;Zytj'ku~IecѢDp P.-LK̆MF-H1hK*q 6x\;O| Eo=1CZR1B(22_Ֆ7YrGýW?-hMH(+zH1R/4MFyRxr㼵 #2Y JFr}HC:㱨nR(Ql'*zOӚ;h༮t G^5^Ϸ+ur_#xA!Ng8|$gNkGGҎG~_14o>G'fD*q5R#ܔ[# kFo#b#!F]A3c;(h4=miGw8?v6Z!K֕U #(\eKVy$d *& %&^zSZs:{+YIلRahm& bI!&t\Aтc\+52^תk=7fu;eғ9\]1y]9Xf<*rv{H^;'u6ɲ,IZ%ɲV|OJ)$o0RKġ ?jKt/gָs Z`\5 }jdMIi#8)ƒh`JKZ)7A!٪}i°~}?rPи &Ip4 jWpdĘuYKtƔ[&[s󁷤@X& 1XŁ!R8Ʉ2SL H¢&-@H|YEdA%} =s_10,gPR]ֆh@ZH:ind 0Xk`@GZiDY#؏,ά𶨈>zH{JH^܊ P"k`BYb)4juV*Tg“acD^Z0qKتogquluĉiy!46,D'ŌZ֩q&]FBL e )q!X`h@)U1 I9:PrO'Te_EYnkh;0w(|̣E- Gu10J"&6!h2XPŕR,903"'&DtDb1U]RGF'ahu4´_,:P"' $-UAٓ{-Aimt2`}}ȟ!25$|$ۓlH9hH2Yxv35v7ƞ_K4$y'* 1"DFe^`b\$UpL- 4=%7>'7Boe_.&^.|np#~#W2OmVi ~2bv1⇙ _.;)c/tݴ[e:rm?,~(|q]7e&]~z}w1yR1JKNyCӭ4EbmZ[W|Ajo'=X /nFu}q q\q /*þ>r~cGi=05ӣOs}&PgFtN81J iĊ篓)GΈwA_w|0YC;*rShT㎾ueJ pd'RV£\[Ϙ3"@HIT W=NcӴ8#Zo_^ο ֽp3wh;pq<~g465ڳ3M|Vh\\ek. ET\xso.+},oʯGhryYbi$Ȧw-̞],Mg:qa0+_V}<|i!eIl9&e4NЙ HZ5-3WuJV3;32nqu߇%bIMw[Bڭ8\jٙӫsyz9IpYjL,(X]4F EPB+/%Rdܣ"{0-.ax_N?=df9xfUstp=27 fsͲځ#+ba7 Wz;EZj0^ ,$mk珠lyN˭{N ˶Zb q$Z߷U-m٘QՁh)Nt6x1{L,3eU@N-G&5Fc~]>Ddb})x-r_L;uUGº,vk_/վ;yށ򦴨Yp8VrT~< IbR̨MF P6) QtL`A;sV ?sStQ6^}*w߅WTNW|f)}qB 3rQ/j-*Kj@^Ms{8{uiй,Az֋zs c+ Vnp'Nj7|* lܥp"H\\}IT&]-x3,:uͬd(:Ydz^J%A;g%^d<7StTN Yk%M^(ä2-PdO1 8&# qT 0Mj t9-׊z_S0KmUܖ/4o-~sJڀ` >;UgU  t.!p#&bUΨ1UYrA'EL.H`97)j/C|&4ՖGZQxa5x)/4yy#fc淥Hvq=5]Kev7hֽ&F pTZVXL3HQ8ԦTPFcR Θ2CckdkΖ6z>t$!=89Dj2P&C`IXԥ{oiC|YEdA%} =s_10,gPR]ֆh@ZH:ind 0(B uH#H;k;޵@ha(ܵ zIاZ|)w,JMYr qr;\fvvQg(> j*_aC)+Ѷ_fZi~`lsͥ 6Q+rC5!lT+;};/򰸂b2Q~G[M3yzd|tX9c*-&B(JWR~Y.N3E-6b׊k$Cp'!qL1n1X\1A3}mbՌE$@weV36KB+ TlO6DR+KEZ0^?x,i4(3p&1`p!J "P$7jD0MH2CBoZ 7oK.gYC6S.JmKw #Xtח"a ?,d(4+?5*5J 11.4aٿ_=Ww SyoK'HWG-6c H*Dpߠ )(8\x8u^ݫV`uE95!rTa5Zj>%y\D>Ϗejg&υ*e7&,̞+dR. m^Z8oAW\ JɏTyr3 wݯō«yp ?%| bwqY+ճys2M|p 3PHs_sy$Wt4 ic8BL`X(4Izdz1{׽ɚ㥻u~ɦQޕ-.C# au8r1Ȯ% >ٰ܆'RTgYoP\~ϯ~zs1Q뗠u30 q# $=+&jaTSK헗So\֋3}1`/:N<a\vT 5M {̗7!{v/D[CxT-ͷ66_aܿ.1}ew ?< 4NqI񼍱FZD߶ʇ1f%,:M°y(Oy#2"'^q<-ۨEt3݁a<0E!\AzfQ*V/1$Wʰ8 O)$sDwCSSGLKC<)AzRAGᰥ,}^9_P=#2\*0 LUK"RDhL$xX[p?%C y,2f˨)H'  [9-("N )üQ(H=w^ 5ݴX v/{AȷZ(g.0=7KU6.$5iH`RGSJmΌ91OsL0PXl%6ͧW.S9 ѧY:zP0{z)C6HF_jRz\Tkn7gi XTm?oaSBېITMcM]RA#POg0ɅgUӧ/?WACBw Zb}7)y5|jx.5pg̎1;c.Y6ɞc'f?0OkyQ15,mWj'MD}Jm6fao %mۆK<]5Cw&;$)$*4ɗ[."\髿U-x4_31cX;&R[k(\ͽCxCmv>U,ZgV wLRU>0AU{CigoǼU)*e\.U֦No Tjo^;B9eht諚ZzS6(w;cޖ'-F<,ׁ|Hrek%8 U; ^kFPK܎P39&!enmm<05ef{P"a˪fc,? OBX mn|z%>< s1M&s Qp=;zrpqoT@e3i$ 4K@IM Q0/w 1 FJV x$ )[X1c@^ˈih4ͭkklT[z6 ]Qm!Xt5E~> XE\nLV@if7t]iL>mo8KD%M7&ސ+5>2 ߕݩu^nqc;{lsonM~g{k~=5w7oT~5? Hn؛p=yw9k-Nfum:6o/sup||{{ȍz%>oulmyW6䟾^Fۂyby3Yn9#kƭB֍CtyFC#iE"c͵W1ʹEGUG',Zi)v7t4D{S(sVA-m=3{91#hlHomN r \o}j^V}lIuQO SYr{Sۀb=L]0eNy=U){ȅ^/c̱|2鷑I{ɱ6$Qs-9˝R 3Gr-/|盖w87&\Zg]%D8&se:oP"Js`\R>LRWW@*X6I3%x!+aM`ڕv!:O0cO&r?!P5;2 T(e>HD ca-PM9DŽRn&x92J#"*;X+\$JmFfH|AB:WDټtS.ԗu;̰gEA=ALFSY+t9S~QEB I[ l0{8jۖ^ rvsT^!*i7{]yxpde՝E}zCw<3 ƥ=WdeȤ7ʒzΠr!Yq1ގތ@. \B.aiꓥu2nZ1-6gDxsv#Ehg-wZ-zX)8,P磥NG+XAhsHya۵w}|sCq9r_ިLͶ ^5CvrY&MKgDk^l`|^9b*)c!6 E(DK(¤R*;A6(kc&Pf!9g@耥$;A 0UtȴWδ޳ٵ8j<_lWt=9eR,|mjR._ȹ̥}s~1i#nJc1F16eP&qxΰ|LIYQFף߬} }D@cnqI_H 1aܮp=I%t@ONe룭O\nqc0F}8CpFء=# Iw>ʫ8"O٢lp -%&0   ?U([5aiTi81z2 p <8VՃAQ{ Sἔ eeY4cr?WR.6X"C *cF%P/JGjEC,Dv?{Ƒ_6~ȇHk ,)L I?!EQ"EΣU]]7bT)Զ˷<_ 8P=+g #7{&qj"Ӛ`+u ;Y5QU&jY&z҆;V*rmw6v*]sIH>K~ HBE]2K&WɶwAj)~%Suw9wz}F@KE"q.%-;z>LXG%RWJwjB3sRN{ob.f\fɛ!Zgg/0_p`hޙ~rU U2R$}V!xϟ{kTy+SۿQx"^q'AD ι+YR7l ̬d =O.iCWKTPPE `r7 3Lfj:$T A `fKʅлXΗsqC\f ="/?ߟ7MgNN((+QuF LJwOo&Ej:Dkt&W˶iqj:SIu_BqmZ`A\h ʓw4f*VN]u8ZH]!YkU&t[UVSWWJݡ%{ŠyBqDh*EH).Gyh0kai)<1 <0P72VW3]jֲ$at\`R b; D锨ăCx jk=ofݬ_=ssٳ2Ķ~eL zqjؖ+|8K;zԓj;iuЮ+ t~:/w-1mo yFIy˯]~7(Dk-Ş_KFmpX{2dma{Q4NScp@ :g(@,Jð~_!fH&XV9C<TIos1iX mySG9k}x-[K!UJPF Ah;.pAמ$s#.-#ٱ/S'zP! N AR-lBilZ36Fv͸E6Bհ..<.\P6욬泋Nn'*~g7v0}'o\c?5PQ5OÐ?8jK%8 A1TBshF"x؍]cj[6ZVwC}MwB `ӂ|t(QyT(:!;i/օ$h#Fem~Vq:d쾌M ACIRQ5Ds Wޣ@q(ԄlytIZ'iVKbqhԛOj7b.@PϜ9ЧFWRG9E}!`:(;h4 OXH3QvylD`iFe*<#UG\jA%'II1רHL(Em18(8Nj"0 W{)a'jc*Z׽ʺ.ϔ}K+,tFv\[D+u$1.y0|]3-hxN[20b:d!H%Q$ E+Z2BP,) $LrɊBgYX)S)*)MB7ZQP"EA,hOn|=VKBƽ8QNX .OcJOـ'*YA ¹GcRKǣf&-K`Bϵ4&Hr篣qYf+G5=l/e0yD}pvWȎz뻋?6~?yŏo]P/r~q4|d;kjiBCo7_CQn@ގӥh/ryzaҏ iQd~4nX)3`@v3y\M d+6إ]h5u]C,OT!~ /\{L46F>x~6}~*fPԣ$`Ҭ ;s0`ncVD<1" +þu֏*g5 R?)lN`ФB:PQq,FO^k%!)(83;2ׄE)9%s)c@_\fZ%ka1$Uр9[N5LX1t$.T2(9oWJMɧ7"9qtG"}b ZlZwuGڽ0 ݁bIc9 ՗ajG$Y&U3P *(QJs qīB{J,Nwz?Z?(ˬK; x_P}xzzC\ovk$?0@=H \{` D8mAL&ʌdXGj-Xjuz9 &CXuMXc?\y]1mQ_8-v~-Ȃj";EYetrsrC$.}b"FjR %Τ Q娀I'#%$sdv sŲ$F^Y:xfظe S Kz5tQZR}@1#lhmBenz PLFcvZ܊ t:79X0fICDzngspeX22#?)2o`_t/L5ٛg  fU,%~i) Ց #!U"r~́X# F^AcЌ٠vPgYd|f)e?1sRDo,Auu"Pʂu!Ĵ LIPl377 Aznl $ TR$2A%JxF 'ް?)r]_?3]ioG+ }]E}^ٳ2f, Ddv/E -Uɬ"x>2<۷N{pmW!}॒py&DSDFs(KFP$q&!D+RF1 */w HҨUx)C0!p)[kƌL"^ˈbhj4BZ"Zg(|6`ٟJ#f) v{VmpYG;1+3rgmvG.׃v0];;OW*ZȐ+&>L7j|b.&ݗqQMun;vmy6Zn`Kcq կ؛p;/8Ja᭷drMf沯vUG("sDܜ)?)jQmU'A_GIan <*լゑ5Vf$u~ey8;.ψ˳ YbHkcsᙋx UG',Zi)vu4D{S(sVA-m=3{9eF2IoM}9'>+Eino>qQE5m{mT.Rb2^=^v0vmy:PE*VU`ha)1H>{ > >Ax~!T&QR)%4j*RӨ Vqb$EH.dBk%༜tYJ{1Mhlq^17=6{nD6奔 VƳȐs  c(4 6hECJ騲򶣼5z{ ~!fuԋaK(v)0c TFa1/&Uz^e"90 97I:ޔSOM޿W>i¥`t, /LOJ6@LA/J4O`-,ęQ[7*z滚O2B.cSy: .Sm?jg]P 恐kC]Suea]}o|ngXކߙLGqt>|SmiӑIoIhZqhpсعLG3Quyԩqz5=9);n\,aT^_q(aN(E f-@!t6¼Irx_D{_gK)>n u=Cm3/=/U pw Z][jxoz n[}1#H[xaN`+1^9\cH*iG`:-u:ZwAhsP2Gpb)Vx<9=#gI5OUn=/tr]'xPR"d+W aXi;D"HـHyAaI򲶡NcX5Z`(3)ne`Q FWtR*:dZ٭kq+La^qCދկa-h=B6,O,bH@aBe.Y]դdT6J!A ƙARL2.'v4(ǂoqINqo /$֎O1n=,#f:vz&f~ikqnwnhT^|X0DYdu:*Y`k]C3sNhqT*-+VR\LT>&׾ӷvRâu8B<ȧ9ؑ+2Mq8d8i6'uɇ':BT#ɽ3M[3T'&u5jD\vmğ7i.^uYy27RuvF?p}6$i=EtkiC<^gWSr)eKRhZ.KRhZ.vxSIgwx;)ƃȅv&&cѰFȨcgmI ZrV8AfrbWK.i/7w /b#/<{᩸QNʘ8P (P;l,lK6nRR 4oRZz0HEOD `PN) y޸煖C*ʹxz1mKYw㦲,I޺t15obCSa͉9ա,ܲ)MSޓ G ,;)qR~ZSe4I)y#L:SBQ9˅r\/˅2ёvm$V[($C K)FDsJ N l ?U([5aiTi81z2 p rVG8AQ@y$am6ycc1fܦrxrNPa3*zV: T+g&`!"; +!4۔aC&K^ v%ro}8~8sGKo16HL)TP.釯A?ld;A/4<Tb^(nq]\q f/]g֧XN]ߥymo!T^|"RWox LqY,C"Ncʠ3}5$-~@è`>n#O9- -AƒE ,ә?Su$Ro>)׊0AQP) ,D"ӘiMJ@B2uJ A֊pVaWWsO_i Ͼ)}':X f~⌶f55##72mZG[0"mau;"4'L!b9D\CĈH*iG`:-u:ZWfZm)d4VKdcvD@$H2CafXTqXFI$)Bc9R͢JѠʺL, a,\ xm#J΃X,JXA8jeZys'w<ۏ5H4IM@U8{G0Q8Wjs(n5"VP &+R}å:FKoVuUh  a0i+s 0dnLV0 Χ!QդYnRq?%;̸7.͝Tz*^ UX'EKgP&8y $"8uI.J J1uf<ww1Bx{z5+a0ӀB\:$˨5jY)w0 ]wY _/Q_2734bRŀw5>KT* :l㕻vZkBrYԭ;UdfKsݯ՝陸‡EpF ?|c{u]+q]04<%>\l`{!I _sIǐo|D JCPQ0iN>ϟٽNbS %7=uӻRBzq:~s>J֮IwIȃ5,uJɖN̉Vua:|>~|.?vLK6 -¯ f`|Tf+n9~<}vtap;Lst1_K!W@~U@wqu=<_ kM4繬Wܿ>Wc>֗K4.\l(?{ƑBv/,vɗA?%iR!)"U %KdS$IbEi{~U]Jgm6>1[R0%fa8=yw'P=__W!CEQGOaʰ?t֏ퟑI6 9K M.($'b4u V`;#SIXլ<$^灖m븖IL9 JXaB`"-xD_Lyv<wzc؞s"dv̳ y;u6(#X0xvVljbT8LՓOjvBXH(V*E* !NN.XA# 幰Ry._]䑏N'Lt1K^:% je:/ɲ7$,P ~k``lVC}$&z\NG4H6FK"g3蛚W8ѸjO**4hZJF~ DB4"&{A""d&xf:\sqp ,a4hPzR!7B`n&)ZLRuB- l B]tUiA >s\[O'DP-rM`TYgD?kR3_jѷcIF#].v Pp$S+FSHp#<bs*E׾˭K֣p3sȩ`knƏ4GָӮ + N2TsW5B;UM3٧jؚlmVK7vI/,׻H,?&7{ O'K)W/8S.ìB:zU=j7qk)]T^}n~| +83iM|۽,$jntNe+yQJOqm8h窟?W:x ]fݡ u=XgLh% +Ȑϣ; D锨ăCx *︯fZ'}0eo&ZAf_?x.,BwPHPOp*Ljw}_eC-lv +s5|.Pch ⁊ _dN}Pdo]7irv37BQ򍬿Q:>ۇᴅBvl2kkY;59W1HL451sD DS5rLۯ:^/ùEɧL}) O\" ܎Ls+@Z!"ÕӊYMyN Xʶk@a(Ȓ#8Ÿt"8!EըnU(U[G>J Y}c eSvayPguJi`[ߞ nf lmxR^уs{UT*oQYRH@*&B$) TS@bHJ:G9 >jPBnBrdǾ:$NBd l:%*I%9F&Ғ9[TE)GvƮPM?{Ef59~oToƓ\b'2t]s2]FcOiOg'e pMeɥ xr{DH$<t,HږE;Nu{';-(@G"A2W"%u!@<ڈQY[¯A{n:1)!x85I*ʃ2#༆(G= ρN%xlőGiuvHN;k;S'{? MIX:iw3Ӛ慛>#!| !z6 < j^̷cf> )q)%~џFI34ψҊ㗜^R0ӧw7FSL{;_&93V ջ臣w^_R{_绋2XG7;]%K?G̹>8?Q|)ݍ^~VRsXj{QgAM[z.zB|Q9_S\\60ߵ"x -p֪-Lc$,|7 {OQΨkJy]~l.՟~WC^O~EM!\qr7s_{]+$8v1 p}{ލI ?-w~l9qAE\!TSWH%SWߠ0L3WH0lU&L.JWߢʮg&!?\24fAbius|uyAdJIU1L$\QJs͞ c3;ۉ/6Z$ HktJ"piBA9b]ަ^!VTSk o9վͅ|ڃhzV; wF5hÌޑ}@E/JN[-^{IwK,_o`7lNr{&N< ׷޻+evz_WWռ5rW??lb2*MCo)>}y&u|Yf jA)kLH"M<(LPVHu mm"X:C߸q퀱NP1@C - Jq&8¢DU s,PY cEϬ)Xq޼s<|܊r0 =^CQNfFo~:75'wRtص".wc.N^V__bR ^wSAP#e&a0~[޼T ؛@*ꕮ@KQܹf-Tt 췦зepu%LRNToQ&SĂu!4%B S2΅H©IsO5Zks i(.SiZ%t5'İE+a_6UN6}mw?yz{ݖ lBsɓ.:ۺ4|tH-wxXtJiecU?'zp2)m (DhP^$!j2)\ d@J!Eeb//ogfܪ n.Wȵ5Qwf A2O|SU3`4v+WYƵ}}%"ʦO~?5ϛ}-Vmس}?3u6,6mC?|>Th̶D(; :(Ek˭͏dДkECsmur LJ@*p*Ea@8M3ҋčtyrF:!#] D98$u"* :zB>yڭ&y5(Da22MYv4RSL1.5.'ZZbFTH/F6\>%]fO>N|jȶAB_B|[SKT)@ϩ B&X+\gjG*]]oF(n^OuHW7~<1 4HPMFT$R ꝫNBekͬ/ۚhl9ꚲ۬ҋ6{)B}5ޥ0Fve9DΡ-r)\ІHo$сGU:^*Ge.}r,63 8Mp\(CsKzE~vXw`x?Xi}A^˥C>N=ޘYހp?T㆚1[FKoUYGL RۛIlq]⨂@\8P‚7bZ ,qU$O'=q`:;r=zH--I*Hq$ sIo@KRi]F,E)tJz~N-pEc\1̴ M`+ ew\ݏ_h͞i8g6XFU&WSyOD3D.T-3D&ϵ}H{:h 9g{-JV$ETT6`jL΋@ܣxKbc mzlc}m3MP^kV[BȃEtb@9@LdoB.RA24 WT)P Y#HKm`sڢP%$W#tJ1&*yb sp1r6sR$M\"W.M+:8$*;W \YReA2j[9 S(W33+o=Qkh) *&a$""RR㙄r]ҍ7pdr_zbbwiSe\Gn5I=I7ZwpHʱCGGBGx vo%2'c4V>C˜ѱ#):VZkpE*Ó8-UѓM\?{Ƒ M/7"k{c$9A`_*sM Im,?3J"ilL* x獤8G8WfȍAQU ɫgV{kF_>5*s.d2j ^ri=XAj*Hn41TʄnᝠĠ}K8C2bmۭ·Pbǹm a)0*Ũ ~F@ (X8HV8ԫڵnOjqE?N]yUip٭Htw\]^#ŭ?d<YՃpkU v|9܂TP&K\Dӑ 64qnH+Dd9$zjĀ !DFtTP$HH KBD Pn#҉"7{9h!7y.Ք'VS WSޛ'Ux|:ݨV~xV HVR4dt4Z$Eq /QNShDP>/BrdG:$Nsndq,$R*&Ʀ5cc<Wgх͌MujXVT._gu}us::Aa{~bBЖJ>:@]"0"$UBJ&bRd=)d5<!606q&Kp3A%46`$W<=Mv~X0.Ek76;6ںa["&TUB;vI2حJ6&BQ&IEyPR$eTGVQ9H پi%Vv)i-a;k3ρU!]+=Sƃ? J,ZTQr%#/@U+ -\Jn;7o1Dl0Ɏbau;cd&ok426ZGJ#FXp@ Ҥk!L\h&6F 2ƃjǹ2D&eh h!9k$2'/piEW%RRMn?k(+ր:78y-ޯquֶ F C@ S)FCv^PE Q"sjK '& B|aG9nB&W2xdd fL+)XHڀF+JJD(Qr%ɇ@qrBZyq<z. :ӟ*ƌ'J"mOT.&s(jǤGMF[v?};۟jiZOx#("!(Bhbw*$F(V8C\!iYAd˽w΄X{}㋪&п\08F*R/q^-Lhe__\qgGY78M:ߤY971>~{0οnPCIt) |2(lK1hda4wɎ񅒜Sh/]x4FExy{ܛ%pv%oyB1J *3CQF 7Ȑ?$A<3)Ǚ|\>?θб~֝|7+[* g*/,Jkɉmpi=e\c ׸V|;9*g 癴Tͳ-uTwޕ7\x!Ơψ_^|͜ "fןl^!u#-#˺aX0a'RG!3J>n^v+NPY;*#G>dݨu2QGy!N#,pc0zL;j˝rSsⷪnݫO8߾~mwo?S_w8|t5 O=Z:]?Ӹ%k9w;9fU 툹.E{, ӏ 8.Qgz-͛Ð_8thai'`qG5T߲ ш6u~y71.+mmj9-oc AJJ fe8yѳPney7 -:$xbD W}[ѧ#6 9K M.($'b4t A^+ IDqd:֨{jTOi󞟰u\ˤHPm&JCrVZGA@ <;Ѡ"[L<׽nPnBj_"46'6<@dg&mttӭ E[m3[ P#(癙U8CF/!L/=~vTS&gG?wv:!+xx9lG?l "w7QnlG%S+EIrj)g"D,e`i8ujÃʠ"wԄx> 4]o[-Gx*&jj yfHDnhsv~.>+Ne`t鋚Tq?)yN06m3j|rعAklFj.(NKw:waOj709(PAH-2fjF i(ۣbᬩb_]N'Lt1K^:% 52C]>/NR4ܨ%N6[ϓdJw.tL\Ķ޿9[ky7+ %EV'0J@fVDUDRu@0fr8Hf*o! Hbcgy}2I>rvza@Fĩ8_ ~u~p0f_37m)90m?7SLr~@j:LCQH$>L%~j+A> uS2`PeV*4z z0$_ﻺT2ݪf >IyR~_]|ov:=WPzK2Ԁ1CP]&ְ1r.4>\jTy]w¿uׄ뎭vᴞG 1 b' MA]{xX(tJiacE/Mޓ^^B /c2)m (DhP_$!j2)" d@J!KQ D;MM1sD DRnpR9&fJ7 ?ޔNȚln}[VGY /3&+`4}k|gA5{7q,,ZPAd}*~׃q'r{kQ/K*W6]^?1|֨(uϭڬVz]eزd*y'kr|+]2u+2>@A⏛1}IfSsӦ@#+.#e8d9M./kSBRW=|8%$ʙꞮ_UckMuSJ|vsPTUͭ͟I7/ϣfQxBlXLRtچ+4^!@` h}4( SqYq~RkY)'ϤtϬjZkR 2;ipd`FzYj tI0#n^S%JfMqBPRhό@H 0E)c۾!s78"8KȃASi*cm> kذk[ϟLǯ-Lswo)sd`1j}o 7jP*~6o<a[xs5ZD"-kLx*BN|&͒[R-rWIGg5;W YdQV#FK+œqc葏eO Ubj|)ӓ>b3sj<숄1Vpp]XHt9@-s KTCv9ZL(K1S C2X!fAJ:w@(ɱ-Q^̴s\(:Ju*8 To-AݯG 妋%mWjUQ22a2b}T`A Ghؒ9KB6`bRF+c"`<;] r5ji7FI;A@Rh(e2W%3 e)1RTI BM]?t*9 KV.h 箌 -ˆsc7\;b149`ǏQaɅ3{߫fŝSMޏДMIzk eM(qK A1P1+;:5ZJ.㑟L< &SG8p_zՕ#$9QBustEҏ'󓲒pXrv1m4|okkG]Zgfׯ/UZiI)GXGq6ݛiWf91~I{m!8>R§Cx{H_gw7/]gqKb0gyz^W Ĕ?]Nop9ƚzZ=yS7yc7 =1aᨖQ4eN>=ޟn⺱WdSMc erЁ (+Z>cydW1?$ +/bK5^nb⌖~~w'w' {ooO޿v? L1Z4$Iuw6L^jK[Oz?]~nOb 3z;:(vsrq-KgUa#D?-+~ҋW4M enMi/E]+`~!7~W_9ZXMvi\C馍UcO'GuVT5ʹYE.N``'`-nB K9i4֏9Q9:e 3dzN* HB9 Dא ^;k!9IS:U_Bb T Sr)Lm̊n|VjT>cYӡ$7R$3Z)Fpmy<ǜc.FdȹUcccvk˝ ԙ5O48YYJ;i%9ц%P eVI)C銔\<>(Lg&-xҽ 7.S /lV6 Rx5}ȑKqi]ᵓv9Hzp؛Re~!a}y|&gÏag?[Jm^vs{D5-X|Almu`rD檠P"DP7n/KZIKRb*&֖TΣ(Kx:g d1tSZW76I7y7~_ dՍӶ v;LPHu[/txO ZATt2҆AMpf-J&*@V٫`18cgPd E5ݙk 0n(.zkwCakXqSw}mXE!w2,w)!EEf)99!usNz6A{#譳?C$|8B5p_~9I cVjPR(thRz+f-Ƚ9D1-%iETY[BG@|cTA` e_ȹEt\-e_աN &UbPNx|Ul7`^|.V 믜߻MEcMK/URyJڀ` O>;*ų椰hC)_GG'q_Y#={Q \<. +Ώ8%)ԼW I|V7Yֽ3ҫ]uBCʮIH{8tRsَJg6ii]{^TˆϳKo-7{_jA`w{39LL/Lzo7c|9n6Bmד,7r$b6l3=FwK~ x[xz]Cpa-j5)ռ-~j{6ceK/*^rVAUVr*N^J"-1gw X W2IbG>;t|y8|Yg̎1;|:\;'f)1S yj6NRJm6fń-lI۶!OW/=䓡;$)$*Vҳ7Wu`>N5^J},qR' oo^L6ytFqV=Te RkT78w|3FLLTǝcܮh !vMeh:鬯] s3{uP++jҝ8y[ }|y!*&J-qwl)ciE7_;4KLp0 hclp-M/]EhSHW^w7fTr~D7`p7o%p>AR JP* z7S ֧@/z8HD"(o.'zŠSkiX$}H3osA#S\T,4 4ˏ7@dgY77N $y #0+*|WNUQeM&ޖ0w+]%f/3]nsdAgj*<S1W+Z"&1| ]w;.[YI'# N&-JJ~,>$@킓08ք)\>EtM .OLrJ͙"<aL5s,y s@Dk>3dswgd/T?.MoKKE>ג)E `̱z&@. PL9Oǣ~\ذ&sa5avw3G\gu"ep;s{ hYB3Ym/iFhu j,k0 g >#3N\*W&^ەjjIUZb[GGӽ@;FC|[ iÀd FH&!#ZJDɶB2+>:J5Y|Ee)"{Bsi2D[,"l%h, n"t5sf|d/VM(=<G - MjvPu(n=>AJ }o@:۞M׹`,+В* %DpCXXw^_9+W1&1|FWUb~&@E!)oi(0^7;ܝ|5C=`Xn.bm>&ӶAai[ŝuUXW \ɎE\%h59tqP2D:q<ŕwܿpQvU ,;_c[ܯ-WVua$',C3g1هͩsLr`gDHon4,<1XtC J:1 4CG#hվuUwNsW^?"opYEuBv֧ Zh/^PR c3jBn<nUb'a+?ĵX,l&QPNӎc3H(D8+F#e2Xf1(\A)hD:'XDC J;,Eߞo(zQ7iDsc"z _$8ec"` GXoǡP=jW3rՋ(`L^T]k_N8ź^[i6 ob1tM4xpҼ{to*w \P;Nfpt-}ެly#lh-gc5 !D!e!x Zzg՘IDk1hFs+%kԵۯϣ8RT]r$‹4΄<(6Qҙ5B6Z<0$5A:E!)5$r~ .0 |# ,,,VJ\[g3ǠB8U3͓n=jɮyhث%Y}=MN rȍ}~JixX:*H 限&%&jGerPaSpSJ|Q(֢*b'8>jK<0`̙mK,[)O" []elYN߼fnM;aQk~Mz/~IfskL.Xb bvsneU4 AxFZT 3 f%IlR!`V0/2#rR,[gĶÁ_p.EjFJmղV,ص>[t(,rJyhBD%"m 1X0RPUyh2*x`b^A+A@F=rTiL5p6©uK`|cW[D\GMjU"j/|KFb3[Xr["UŬRfğ%O͐Dͥ 6Q+rC5!lT>CncHvN5S f%-PWQ/. |ag>sنjrHwJі*l"ڥzdpEDJs˰Ɔh1Jb[u?rE؄ A2 c*R7F,<R5m0nN珌ƵtVu ) A5Ǜ)Rw71Ӛm7;QZ9 uS>u0cxdq q%B!ݦCA0I`Zr׿I5:9;=HwD@$@IE$s2JaQiƝVc  0B'ln[~, 2 z8|:c&%!!'Y ^rDSy%B K"G R `GߒSǎڿjjH4I(3p&1P8Wjs(n5"VP 6MH2.Oy#O u!P(QQ I>Gb}&/ŵ84^*f^J 1^8$gð]_S, ESPXj!H*DpɥbNdmd2Ax}:~*a@]iBRtH\].*]$J q3mjW9n)I%_xet\*E8:诳n6e P\Η5߀JY ;S,?eS0#Y^>xfzU}phFρ6_{]^x " $Wf0Gl? ^3fZ_=u4vq4ЇQòPAvݜtxyzuȺYJY'!#Ḿ|<o& b"g KRfP47(g7ro/ޞo._.޼}8/o@jAXXzGss[OٻoĥYVLq.#d]_ |g_ gz3wwfSSLQȠɼ%y5wˮ] ݌pQ~-}>}Kble-R0 ăŠINg ("`ed- *0wHsLNj6X?6T{'o1>oK5H(hTd\R#!h\Kt3KtNݷ$a^)B2?ߖ*%τc1cB WKN X+l.@}ʖjU aa5YK]D+<1G sAF1yY-]Xk98{#TlSN<)tLgP섵J)?5Dc33+0q +h&CQ{0r&iㅑJmc}J!"'^q<-ӠG=Ь ϩREgF(++eXƀ-u'9;Ç&H))#!q'(l)p+d(,}^9[a.GdT֥.E,HR>B"JFc&=jB[B3_js(B*P`5M%Me -΅T+a8g]>=tښ0W`35YV8p)g.➛@ia V0#)6gFƺ^ b-%l/G$v1 `]P?`k U ǬW鐮~Gjd~sVk/;*y->cݺ(GݩSL#yvys?\# oC&SkA64$\HAro>>_5dz U ]n.^kkn6$U U~pq9ةĵ,E$Ә ER";]i5Ƃd$FcY%VAЖP]H4HKfyE%W|1KkIܓ/f*O%&_4GS q(6Jai=?JZ8A*W("EYH!)pHLn̩{%R/Wv3נ(A +Ȑϭ; D锨ă@pc)a7hYkجˤ'kcc"Â2syR~.h=m˚LOJ4nPDO)Ų8g\g:|pv:ȿZ?*F!5u~/!dg:F$A:yّI> PAJUk"AL-Bue80(3Y`jDzMtny} ]KҀOvyn\֋hGyi{cג F_vsd盞oΒoĥTL$А}~ ?3A)gRX\rT@ dZϺ,{YgLKrU,n3az\%$Lfv;2M^Yϗ×%9sg3sM9.OX=S0SlDWS˒Wzu!v(EԓDjװyv?Ȏm{\&M8wsLNok͍{zy$Ty7Cs[nPΎLS*>2 X>BQ-Njng;\s6 v:|_fYW<2IuRnյv/Yga-fYLt`;ǑQ#&vJK::sW=s Më킧P>+5OǼN:~|y1#c:(!@ y-QxuFuVrCNsbrsL sxfn{VܞaU' S7i uj"W{MnŗՏBOt\X(tJiacE/Mޓ޹Hp(~8*~a҆1De^rJ1OK$I4Pba$!(/&D)vU1HL451sDE)PnpR9&;ƮSm7Qx*ϧe`Ֆ)ow+8kZ1`|wt+3uic3l3Zh7=muJw׈JZu$+nmB!>eT1vEUãﭳȾsn6Z~q_Ra:li)}4R=dǤ莆7%Kv47U?ۮ0{ӽ,اq#j5eͭOû[բJ =,ߍOuQfDp   Z€p4]wy/.NIWgP=q(ND@Q77AG\'ςC$/FUnLjG#u o`kO̰g,| -TTzE92hjZr4dj> H%#hPEϪcVs{z1DCQQtZP ͘ Z:i밂/0c/.`bAu$Tj~ ']`JZ`gVp!(\Xύ$!JʘT&HgSb\ks !i]otF ܡs+D!m[|mfE/}p?' /xa|4b!nFS;a'˵G6~F&WSyOD3BK\/0gՕo<"!9Mr2"sI!RdErP\ϣk?19ON9FUH|Zmw:>aZG%NĠ=sQP#PrY!Ih fR&N9{vh(Z9|I[mqS%$W#tJ1&*yb;qgD;-E|({kۮSl)zEYHZ^mpĭ\;4|!tH[E;cN8sKx?{7Tc9Z+4>C嶿ڃS#QGSt:X*ǓU'mqZ08'DIOrhx6(0toq9EB8Kc{IUs!T K T$uTArylR&dwTtϼ0o>M4]rq`bW oN(by^=Žp^ⴀX\wwG Ȃi :Vߝā鵚_HYE֑u@jiIRVxb$`v.{H`LhRiv'9!!mx쓻?%w"+f!CAjfʼnbעwz(q%#/eaĭ28Pӽy(gwTb"y^2ʑ\_|#SxE-xu\SA1t< :#,F8ZPIiRB)%y.4JQ#e 'ƃj20224Ct d;#gO:?#KT /Prp6oҺxp盻Z<nI2ZVOVP.Ǫ.e@P{T`CzAI 4GnْCH/u|P,#Ե A 5R)T鹠)XHڠfT!GD%j| nU,{*? *GjXŨ؞de}?UO(>Ef{ɺ\ Jν> 8Z:5#4m^rK.D~ēUA5 YE!BB˸S!1*dDœ 1MH2sRvW72!Nv-FUi-Y]\*R^d/:pwQ-LPlCe*8 j;]bvk}3٥))O y0m|"=9η NP+#{mkX9Bf,[cc篓MpJlt0L(g[&Y56W?_oooyK~/o÷(u30>ǻm%AL?'`Z:Cq˩Ow3k|@>xK1q#H?.*p6umG?o~kkأkAu rK]PM(y .}>{JvJu:!QQ1[R$%Ff؜<6`(O۶?&p"< L^VcήLn_J_R'T0YbhRpA!Y@(8y_^k%!)(8W^:EB9\ˤHPu&JC1-e,l0Gdk!0Z[87/D\ :6t}4v-|ێKv%ca#S!-_/x3Da}.ś"y$9V˖b$QSE6Wd]F9 >q0O[y9TGpQat^ 7rr#jI|^mO`a!v:Q'?jԀJ場kgsVH9)țUnT`ڻ& -|P'ۇut !&v!3IzUL9sFK{Ӱi{V7פ;OŲޏYWo -X!fܹ~,h7l˖+|o2ZU.`S 6a)0mttUc_pkdQS&<}1|l]:zi;w}itq=v,4x$۫cᜏku$`۔0%K)zťw\Ef1:91u٣7 hwf?Ųz,FdCS) QZi: MdzA+X E)6`1_DRR:Qr qCJ]H:"k^~$!)OzcԒ7^Go-Pk-q6h:i Mbvq},ɎR׍grmubbܜ- hNlpݵ w*x+>xTJ%TE-jc2VQAr5φX1$ZڹUр1fYٶ71K.zR켈lBFbqXLXEZr,63vB[ u{JfezEv/s zA|:{ምMZ;.Ȗ!ym<52*Xm)1UNSݪV a(F$&) lJcK4QdA=. YPU-q6#qy,]mvPGrpxs:I%ˤc2FmqRH2hWL Y "+:d kbbI@0DIǬ# dT'X[~Q˷Ǯ*#Gu3^Pj  II ފ1ʎ_H2|`9T7SYd'M"l,* DID,iDU(^z8[5#w뱤ǒV7kz=/-RmUz#79tTs[xLrrZ2^Unu,y?[uh~ rF0ťh\psΓۓ>rT(AbP|#vr:5&Ĕ5G,YU舐"[縶IT;$[@WYLh/1JB )*eIJRf"N*a2T"!!u13W~&Ζu鈲NzЏ_av 4/fvC8.>koiMi:ڤB`SBrv+Ŗ9k28:&B4EQ[LŬ"ڐQe.w%z!4A%(d197)pLWUnUqMEK62UJhS Ct\ *C5q6C!Ǵ}T@YEWK)i3t<u_-]m6# GgF90uAde} LI&A G:3>Ş}y^$febY8tas%S\I;C3AdPy OeޟKo'^i*3Ȟbb6l󶔠 cj/A WL4J*]jBg5C!Cz)mٝR2_EF?ނ.4qf/AkˑDЬf<$2[:!񨹉9˲05;$} n!pL]ڇoZ̚--mrt>>C㚁dgr٧hD݂zݩhw쯧 p19کìkg:_Zm4onFmvݱN z̓{`{Gϝdp!j_/[,|UF&℁Y* 1Eb z$8 fQLrgyb8}``F>sJ{(~94mIH1ifu6Ų噭^PBG}tN0@F:&BQm@q+30ff]mnƭ*q݋VGUG\&ފ{R1ԧ:Ph*v6(jfa/Z47Y$(bVM u2R6 zUn*t/{׶X<ޣnS O銥TTcS8" U^xH sU дBƑ1ЀxL'LSEb䷖Է~6rɹb@$bP*1,Ga 2G\y4(՚ )50EĪgU2fZt,p,D#̵7 yÐtd"wg+;kM){ltynC&s#)(n\ЊU`u726ŕ9˾HKuDRHV[+3%fo WB|s-t !ef[~/XVϝϛ-g!2*IrhH"]smp 7$ p Һ蟽3x.vp5bw^j~>L5[\-Vgw 7GK{Xu1,6MEMN0:!>:Pn-6?m-]%rvN{=? |) Fz^0,Fy4oH)-7ҫȍtstF="#Xe\e9.B Jjm 9nu9ϸg-Ef:E>,upCr&s/|Tt Sh3FnjM"_@&ٗ[]wz8uAMSU5hYOS*B ^zVHH0"lپW(W&_J4\?M`{1i4qz/OVr_iORY>H/Z 5OxZHqnջ9?|#qAAMU/'lA=]ӻqu۹G,]|oᘮ: ӺbCvz7uQK`~wݕ*J,^`^d3s}G|`Yv9ңAqP{;PKqo4ɍWɠ+z_ߪ$a qkYK|@2ߦ\\[W2+*>ˠlEiâ3Z~y6cu%7)jn[CKOU(NNlV#FZI8hb Fi:YE?𔠚%#{'C=v~rƒIR?\bH:F0f wAUj.y]GȽźX-{tDְлAn:piq.\}Ggk k,iF$~enrű;dwJѝO8!9+0|/xhӼVdSo&?淆d |P\*3HHO A ֺ5488NIs2"c KJ*LQn:T90٬D>=NVuhvEoYִ,Ŗxø(dVy6Xqu`IiqG>XeWZ8SakLqfa _(cv+Io(PknpfU$Փ:;pRvM!~/tr5~z= Bc\̟nf U'|+鍲.FJEFL0x`At>j77%oSiwXduBnNq %,*{씰HixO BJ ܭ/R#uS" >,Y쭵>!R&0Nwe`2͛F!wwRF[mAx-WܞLfq:ģaH)E?!L ;)n:?z"qIk~H FOWv:l͆GrWfii9Ľ|;K 6ǽ=[iKzc_nN)'t6,X#)pZ*\vBG(Q=u xk֮^ 'a n ТFsAAnWB$@]iFIi= 53Hn{\9hik_88o\xn޵)=_|=aw󱺛]~\^%7ּI#>ɳa`^ lQ1׫ef>ABxXԼ_?O5Jy`~ţoV&l~~kR&@IkeC]Eer4NmK;I6t$7[`>cs$;F|M@{yg`?SrNKIN$I(+вn4r&leViFujgtP*ldZDDZTvBYe}ʹ(*Z-HӼ(.Cc'\gK4iOgfߋ ƍD;W{Ѣk_ȴ898XjZ*YiR oj:IMUTh1bN"4=zX Bqؔr ,љTkog5Ґ!QClSk_ᎈf^_E2dY<ZdD?|ݣi6}'QѤ Cd C #0; xBƬ:ւoO;Y~%Bm=(TϛdU$JTV"ωRBIh:ќCkQ椄W@11̇#vcC!-ю+\&d5HJmI'кHH#OA(ф§tp}$fnJ3HTf PtОB6P=U |.%ˆaJ$z.'Pk\4Ȕ#Q[݊NVp ]Nkap^jvNy2Qh?4 I&beWzWbIm%5XBs 9+:4tƅ5 -csjK`˺`T (JccJi9`6B(`ъԟKC.0(|-hNaRKh,I1?lC?!oklBWBJF2b@fC;LDl\'#8yw/X%uL+j+1w\Š6AwJ~Xd\!S l)P;​`1gG n!V~!_L5(%;&|JA`-ʁA` I& 0.Vj!ަLEwf+%J9r b Y##K_LAG=(%WZH JW4z wS)Ρ&yC|`!39yPKN( 2QӊAUY YZ0yc恾S!^BB&>yt_V 5Ȥu;@q*q/eSU_ }NAՈXżjJA";>VEhӭ8>ϛmͻ:IU$C&Z>ʘ}}6FO@(;KSv R{Ћ:"Jx Z$ղnA׀R"2["fqh%bl%'(mȂ ـ-`e_!ڳ9Mh%(u-폌Ɣb[y/G~&^Ѡ %0 ت[tвW SLCb{ih\1ڹ{<_6 T_m^\וIU42fU:9lѓTG9IvBw'A٢bj[ѭTךBQ8O%%. dLPO?6"}ˡ4cijogBD{=tRQ]*X22C Q3ES'@=-`-TتŰؑEV( c XQWTb,M@᪛r`0Ҳ¦g@h B2~_HfpJ$nÌ g5>T2'ʡsIjzuCc6I#WfV˥!v`@PHlҪLAdS$]qf br֮Bי~wEª0%8 N}38BVӶ͋]VEz2.xCk9T햳? NO~y~Z ? ZLzP 1>~=ġ!͎R%^#q|1VgJc @eF@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; z@1" $'L 8t$zNRjvB'PM<; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@ 9|xAN ¾'P+Ջqr ڲ5:`'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v:tƼ$'Bɗ\_h~N @ v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b';^X~yӾմznl j-78= W4иc\Zq (ٸ:K7x0OG0JW}1>ˎ[Gק2rzJigqe~~ώޤ7;:~X9zw|H5O,7rA{^֏OtX:쿯X .4~7ez7x |/MN,}_V:CԷPZ%%ܻ6o9c==+7Үq|=jF[#.#]nՋS _-h^_}I~;/OWs^5Z \ՓkmϚMgˣwЗw<J<^S_ߏ숖˾zt QMD#.NJtV~;ic:O5_іc{<7Ń攲;kdѵA96k49IOY49)j'&GWQ[͡RnQ{a9X{JM}1YTn $GM럂~OxT{Oq)z)b4K';|wں=>f.Iz/I\8Ts (3y7O"h6Yaۧ'G8X5߲y"}ۯ}X1MP5d(I'Drj gG"Ȫ+Tjv/(feUM ʸh>y*٢kSMmN4|!JQ%[KPb4dh,cHMWoYC^yy*/}jVyA>OD}Q>'Q6b`ˀn^R=YN:;@Z,[yJ씉$9)Җiĸ _|ƶ6 .eO#?\OcSaU"$)"LTZ[vo (3m V4vS@8 qErD×P>}܀٫}r~ʔ.ןY{L[= v՗ДژZ;SvE~tS"8Տ( BfÆap{ysal8o3{z]X~.|/^ʫK?_Gnwaۡ[(w_x/*&f䌦X[t+'R.'Rώܢ#y*K{Z'"J!7VzA-uBilJD9;K>? Lt!ڮ+_O.+P~qnf)2^}&'o;^͘ |dU*Z#-Z@ND傤dFHO@+A{J% <6 {chWº"?Ռ@(U!s_T%<"ʀ3ek'k1+vaO8T[nsqEe2)1 28ƽQfNzK%u/H0=݂ߟ%I|a}%HXtg'm/?X0qKr'p[w .N`6|m @)6>`ޅyPSû쇩lvY\iO e06x뽎6? cQĜ8'r{+"dOYO>raG0~/imޞI3uM:vN(P#'̫wxtXzp$U2yCvm!篃?O!Fݕ#= M9pSçc?}8;u~+0Q' $;k_jc R6K..uyW&g# k]V|Pq_A22zZ7sgAOJM-F0$%z+J>rǼ=-بa?!ƭ\mHbcZo(;[W7ݰnc IJJ 67pq7?@s vw~GE`@Hb VF0Am:SWBMB/oۨ:Z4iLRsD)0W^[\kD!FS{GjD߇!1sqhi^s&QK(;<ZGXaBd̼vp!T ou_H1!;EJ;-`A{c{QG`9C5zx+{z}9\*^LL:zJCz``eqC:~LhnUzU F7R窌8DA`0Z$QFRiy.P27{ #IIAyoz,Aޣ推xU6SDyFn.A0\hS 1S&&SlAyB%1Nuw0db ZZwszZq46.r񻷷W߾?'_Њl=/? >ȽF FӲݒV[޶z]hoTH뙐ՄGCZ(d 'Ute"!mu?K&F:ZKo$A0UWGz XZI\VyQD@NJx4$hb` `Vw_=wqQ 3_n\?x7_UhCgz2%_-p*s+{R/K8%RHe%Ur;[ApHl];bw}t~5n(=nКeE|k|]8%3xC-@rR!Db:QD%,wVz=SF]W~(C|8B9]%+!TT`,M;):8QDK/|іCTP$@&$%Io@x͸t2z c]Dd-y_uMա=Y,ϺoCӫDGI V_^yȓ&ȯ8nr~m˝<Q҄Vޡ̭T: SBk]&E`\nĤv*vf}bLޓWĄ*&cE6pJciX2&6gх8c_] BGՅK.'}{en0ᄉ~&_Gz5vRKi)C[*4 G)ʘr䯖J1 )&'MK6< \lGYV\^DnYuBsr8 O$hS_r)rkl?Pv1WkZZGm,3xrJYPWJ% e=ar~prQ}4-dDT8h5"0T# c4Aҽ]6F}#dx06})W#׈%K. h# ^(%*d_xP|]&)%([JZhDlRKYGDZLh4!Gֈٮ;z Q2N^|{} "^U_38k4N07mS}3 mFEC&_J7q*!GTJ)]1m%(gMFWQ)}#)BNxכTw{`XMVk ID2hJDȴYRAzJ[aJ חǩ=J nR!d8jN2!\0(9Dq.[j-FΎatSBu>\^5ow!0 jbčX-a7ڡ/cE[,)IJ&@) QEirR;ODL TQ*2/÷'@3F޶,|$UApʑ,"jx8BC"\G䆦6j͙DL!=՞J1p.Š9GkWs;|8. ޮ#+46Yڻ)«| 5Zc6N;>e r$l&tS@De*Qϸ(ч(Q<.Q] gg <771rM."] Zfz}|VALW *-({3 %MG%ب% W'Z*Ü˄=}M[^qlJE/c] 6`g}w[fuJ{HLJKw?XNFH|N`BhmhЦFʢP\4;=`z,9ӳ{Ax|BrJ&: TB(xbGAjx.Rrl))\H^JI ( Sk y$ e*}]? }H~@pvVmҭ{SNe{Bࣇ1M.,㼘>qfq&Ә|΍U/i~$iW䣫?]$\!~MW#XG'= /0r%yۏ?Zܰ6=JRf( e Y*FΚ=9K? zuo8_AnPypk$t>&z1\?.Dej۸_a&{Pk˫xT%{[˺TxyH-I9V_c"J)Z5NHh`t7ֹC[^ Sio(I'3${3}g|=&{JtSg=teF=wm/\ 997vn9WGag\xa$4fkhd̲J bRvU)IbZkxGְh/b^Xù dI,2ɒzW-rd0 6ogu럙5zN|vph^EV,6t}.~͕%r%iD DȠB.&ʳ u5in.].>ݮZzmD Y -Wp0Xץ|ϯUexN;/nqYסzl;{;&Au Gu'>K]rk]J40J,{0BJ$j9J,TJѺ_+"֕8LxP{2:g.O#u{u$^a?)pp)Z= WĥtX$M lNj!͡j}Bdz||^P|k%jtt{gOKG^]!V}4HZAH[]h?1 2支) Q:ݫ+mz4[>启|o~ՑIJ(r%Kod)̼Nh Ռ-r"~kD|Lc#<;ͽ@gL %cADP1Hi1u JG r`^q5nAO" >4r~^I2|&iT=tzJv@pUy- ޸.ջ]tjUF_vyoJHp</?~=3'M#qwBAy)PcfVڃBh-w.T.*D-LL>*j8"j%}B%^ \ Pڊ+"pu0pUPPվUҶK+`9 *[~0pEZWZpU䦅 Wְ["tC)|?Y&pFHbT<wc&y1{G+ sr\r!ltC5YPAyT!ǃɗOsN%4I+f ,^+K@fga7`p5_|wʼn`^$6$@%b" 0$In4㒓FNV9h(gWՐ4e6') )ݛ< $67c"(ixrjrnۨ>Y!J+ZU9nRRz(YQ:y1o;m#vur ~շÌ8;Xyl-{(pznh ڵJ!7Vz+es7jJKäRB riqs.IiQ[Blڔn5c|GQ煻ʅnhkK^ò-UyxUgʣTdkӣwZ!)*Z#--ysJdFHdq%(@bBpJB/:}PĤTj4tpnfb`Z%:LEYNdc hDM BR\ {{ASf(b{})폇^;ºf@T!'"ٞ>ey0nf ={DtTGMr٨QG! ,}6EXG]S<'x3 +Nr/TO6۟<{b?7g~oޞ#3g=;kuD+0N$XJޚO oyukr}wJ,$>(XJRlEg_Lͳ."/[͛lA~8e7"TSCxk -YO`qES^2kQ޾pk1?l, 5shcjO;-γlE9w ֝0<ΔW'P}ƾ“db+#"A[RgЍޗy%;mI,f,(3YHI+΁d3זZ+u( 6\6*0 JbhGs_3UWP٬??pJ-lel[M4-/7د f܀r,Z4iSV9l6B#b>Վ=wɜoJ=v鞫|19X\ f`ZLJJ-:I {TPY)4IMC{B,R愜DeQ[.ȬbN Oh"ƹ ᓷY{9.TWzS#7妕bf%Z FďWYNɫrv!)qzeǯJN?YreɘI,b@d2&!I1;#JEbt\D(8/-2RRfJ gb>AF0DFqb+8qt`kdkΠQ$^LQo#P`&HkՈV4+ā.^I51.efҭ)ȾE\xck/] Rߓ㹞+Kb&\{UQ->u΍ m Mħщut21I/-.gUY0E+;ig?:+9'%$=9ʹT* *а EJX0fnF<܌rs-݉ïl^ kwd.z 3%KNhc$r 8M(rne Ȣ`;X+\$J m ABs tw5w "%~jz}ȨGU#cg)Z׷=x|VUH॒py&DS$ GP/YOb/A(&HLZ)CB D+RF1 )p;XHvCd&"e!x0y՘IDk1\ 4MVHKD5rdK"haʜ7w]z*Cyk+octؘ 3u} of\Y{,]ҍ- D%L7&do[EպEqV=k;qM3-p0X_tT[K-<-T}uCބAƩ]gf&57MMji[sN/~y->eytsuݼ^. r瀞 g?]EۂyYyT0Ya#kƭBJzJ׮ty@J<%ݡ>RO1BE$Ǡkcsᙋx zNXR*# bhX)Ql=N;[B7zf֥L AcJzklޢ/ ~3Y>x=XIWV(x]STaj0UfzԀW bM>z@0h'rSO*qԁJ>S=S=ԁ0FP@x@ ƘcxxӋ_JE$"_hY"M/RK˫Oja"ZRZH /#+(3iTc R5|Ϻ7l`]°`>jBrÜQ [XcI"iC Pu ao׊0AQY Q4fZ)k.1109J(E)tHxډ׹#>YViEfDB } p*l?¸+8jG)ZqGJ-Xh F-wa'H9"Zj)SR8,P磥NG+.hVpJFã*j77M@#Mi6gU!mzl}/.=M&D|,\@rU$ SB  D<}}מ'O$ D9 &ng$݉;)%jO7lh<EӦhބ\>En5 G0/nj ),(4t4RΖ~Z9G! GRb er#'38znpdFIҨiiTi81z2 A&נm'S;2C Z,dٞ KK_L_?5Y)"j,2䜠8fT;t*H/V4LBDyټ5z{! M=RiXQ`Uby`3b紜%67ϣ(BGT ЃjKPmVCMTJj_aP&B`~D*-qȕG#*%Y\qŶ|R0~vq 5B,F.~fq0jzq0*9:,q XW;zqWCkL׉`XOa*c/A?: ĝ5}2l?x2/ " WQhaJ 6?tF77S}Omn=y0}߿=VQA)3ZX$S^oVfͮLԼm.0e\#@`h a"I {AD>xHT!끄XY1s1EԙY]tgCʻjpJU/u`\5=x9wxIAq7Aa. 4&T1"HJ'%>)UX4P˱K=nbw}`p3YjnWjc^YEQ/G61o[N~$<}^'{/DFSl\XƐҹ1Qqdu.:Ln7Zb(2w+󐽵Ҳ clK%#5r"@X9c*-&A5:FIl (WK 85`BL1jvfU6e _K8-h8zFΖ 7#FY Wi:WqC$hsM42w mTV!N`!"cNymI`8Q)-6x^lK̐msI`^ ׻`چu)@ &ḛ4N+ l (O;@2 {~, LQ'?zY66S&G0.b< dm6xNQaX,JXA8je 3?%ԙ۟hn#h$Pf4™{<F\!C>EH2nՈXA%`H%Vn4tT@8/?luf*IMg`'?X];%UY;󕹅H 4dnLV0 An!ai.v0k(mэq73ur<)OemKȣ},(6Jp+P $7R J1uf4 ׶ww: > =]Ϛ@04 9P& -,~ Zj>!tEp~8)t,p|~/8F}]̢:7&*ʾm~ `l|}3^HE']a-pM˦%2?nᬸ#$ /PJ$?uST͓~,uT>xw;?8gKѰ'\~u/uz3"`I/ϯLZ5\#]crYFЇQiI>M^wk&ǫ^`.z~wpV` +I{ Xtʫu&-O_oݑKqw^uTMq}W~`IxWwjY>oނxo񚡩b[ Q`qYS^1՘O߲QveY&Yc4$IerE S4/;Ӕ1[R Fi&a<6` `K nS&WftR({[kk 8J),Pqt$6e԰R(]RXtfR֑FVvHUK'Ms|%JD3a5X̤O*pxX@Ř| %(oodߕC #ĜammA37.W?I=`*=_0xf񷢔a8VSPQwx<e>`b#h{J%%\LI|(dWՙTfd&8(QMdVl'}Kj2L'`^H8rl[l)$da  aA:AONԚ vQzO)Ƞ))! .,}2'/*x#&AnIt >:ȌϞa]UhM$qۧ꓊*r8>$xYji(^QdB5[N5GױbXm\Z8V)$E,Ii I3 ؔ滲CDtN ۺ> ܁GrV0_14>߲vNU O/ZߥcCIK wm-}wzW+/Hl\/K)<``Qjܦ Zi3&7c|ѤϮy\]2ߏ%(ȿ}oxg_Z=?/$J $jݳ'#ڈm[#1MV'twkͿۖډ+ Oہii6R3/x3ͼ4*(E;M>=3_x;_%*P[0ڐl^!o ((ɠt)PBĶmn V󑖕DY% 8fN!m49:'05n&viыY,MnZk ZJ<(dc(Z8 E " l!33Vtk2 VERlPKϊs1Y$6Ʌ]{M/3U`<}cD"3,>3BNhIʦT6 /XsD)(`+mc2,/IRq&5O[ NBp8񧎌n\l6KEmqĀ.n,I:f)x[Ȏ?ϘɧZdV/S}D|[`C?ynq0{1!WѠGhCIBKSTRŒ)b$L20"p'AQaLG6lP>[8y o']dWևb?ZJ ]ew ! ^x1%D7taH-7s I4S!fEaqFC mecڄ},1To%$%#b**Fh ð8>-z7D=̺ڏ}lnKa&s| HӀb.Q_?PN̿QLs 0fO]#}?׾uo K>_i2}}Š񯵛͔^&7t ;:&sR.P ] dII yNI+p9ER[$P@%} kydqQ{ G-_ev˥nW{^ȍ3tUDs{GbT9T}]Gw w ^6D^o}z\~pWFndg/s~gsq-΁Jx`VnۦC7_apAF&<6>G)L[nh?^$.#A^&KfyYH=-~ ΝO̍@*3^F nfbCknyK{-Zp ~.~9}~},/EC-Ϟ+oo~_4':3hYYx>xv;މz5|fܘ-_z']]Ƕ%3u o%6ye5*;# zb2Eƃ(ay5yyٓyyy%QFbGq}FJRI*Ǡ/F!=z0 \_Yb0J1K|H69!17aҺQ+qv;(xjiG\y'^>P/z[*_Z4Bk:RsJ1-j Jb L C-@CNK5pլ׽>7z}'2xk0|1!0 s"=V,0&R+)B,@, 1Leܶ'o5,7^iE'Zk,;j0%xm:_>fukݴ[͖Y8\8\JcG>ʇgvhᐤbj]PX&Gt B9y38$rtlr|T(bu AFZF|H0oSY1q:9IQbQ˪ w(R]?6ĐHj$5Q@Fy4鍆5ŔV^ 8X+~;7GoV{yu]gϬ #~/4`46-,  U' m`ռYT2=UzWj>Y{ƣ6K9^5Pcx_ B`G37IBTcHki/|xyӷ}~ϑvV!]*0Уe hj}F&\ \{~uR.;̯ ;BQ-p͌ gQ4chAC&r`%n nC:ؑ EABL=9]JzS{ylq wK<A'Ý)BrFF9%/_^v9ZL(28gIHR(6P'RB2rr,~5rv-|fj,cC[k[׺tQY_ޭj)6T(+zukb|IxشN&qM.pG UV2Xd1GEH%o:}ԅ ~]A QGMcAz*:k IzB 0Ro8 *-C uL-nx4tzKXwa ;EcF[0LF1!1 k@dg=7XM\ c_kk&j4˜KδsF2SHT$;i&0){eС,ZcM2wtXAڧ4{Fb'SRRJY8(k*8ƃ]R;Llox]<%4vz?H3w 7=YttxEp}nUq&s}CTbr#⇙1equweԒ]Jj*{w ⲟg<KA3䂝-x'W')f~m $ё&9̴YB+uƩ%O 秅p8mk~\hM=p]LD+zW+:?j’SNOchŽW=&(RfXc%紅ZjF[3ݛ /"k}vm{778%10guz]o8͆\Nopۼ;Tgnvv0pb`YŲBOO7<vV6Ϻxu{VA[:)粸2"h˭/']W^ F{Vn*j≿n.bOOo~|Sӿ~zw ܞ7@ΟhKcڵ$GXXqͫӏKmV_.0L?']}(vcrzz^X50+Y٩Aҋ[[7soA/kMM-V0b5z+k>yrw^-ǏK~J:OjMMcv߻d 3x'm4s.C&y&fL$}g(}m]V|uL^;E;jٰd!)1 8O8Ha'JףQ"S2-y3#zmY^2}v* ~Iru<߻eNR0Xd'.h8h C9]H@#1N:(':{' mk\ ܁Otpwφ8Y6mc AK2k醌ܔ>ȥ4]YlXې*=8eo%#-O^q=GWAԷAUKݗ9rZbۻ9{SWϴ#$l#OzߦEnQ{w_goB7Zv 3--rݳ {-9VuC}-Sol*l;Mi4- :E; peAYƪ eR,}g.QZA&Q`vZ7uggpyky$+I9Q2Z[S9PdBA,pSkѵs7fU~OT/!eԔZCfܺ~,|nv;DWW>^?O38Y`ҸK fdH)<6dΡɗǖ(Uϲ,ڳ^ӑ G*slS B>R"Rr&sNx6}7EVO_$YJ8}+fJ(TĎ\'A+Ri2yI&zLPu,Jf/%'@PdQe"g (M+HT 0+GV#gٸEZ,@nqqu,XdT,bTU2&@xr7_rb& ^B**PPi6ki $EgL m6}0eC)"m )]R! ֦,`* <ٜ74hK$9;&lR qƾXh*cX7YGNTmA{1$<ҩKQZsk]Zf,-ov"KZbę IBȦe`L&sYKt,[o-r#v XPwڶ2j; v͖9Br2 Zl5FҨ]`sčR[$&U B&$YQ!IHۨKYRJ,QWx%bOXhlZ/"ʈ:Dqc_R $+=3:ÓX<~Q %H圱n)g1"6FE$ d$gB J$"H4Fx2xʈXUI3cuV=qq^.JbkKDpNJcH@FBs)f!B.d9K+(iBpx,xX;CY<9-Mj2ŏS??vmĬ.!3U}67RVw44B$9{MYpi !. @Q긣 ѕP <IK=fE +Rm*y 9PUy-w6UV(7MDZ  ,9L  i%GoMd3}f]P8brsř)HzEI >EI'ɔeIiC B90`a$|"!va{N]e:Ru|[i+0p{PۡEלtȴ(=65S4Ed u:&'kX!NO'N\~{皕{(9ȹńT\x *!$8>x-㱈6sVtq+Y3űdU}To£Ǫ#",&뎡Tt\OE|,,$u.^q4N B<%} /ڟ J2_铷~2R_h|8 uDbRr#W\~q:}Xj)6RlɀS2M{,=ڻ [9k[,S^_,ş ?ACE- -ҵU:z;iIwe"Չ] ņ9]?׫ޯonW~kg,^joWKߙ^ zo~7T\}y^&ػ޺'=r$bsIhlT0!&xqpA=3^v ^?t#_*ڲDn . zSC0Jضܻc*r7o own=Zympuqx7.^#tQ;CpqVݺkW}١7߭~{ V&.a V$UpI V +4 V职~7kݨc?/>.{!I?\rYIUٴ71EĚt۳'_0'sײ+u.οfv/'./=_0+ AըskB S S觪?~KZW6YնkYmEZW&Ύ'Zm#~#~ۏtn?G; In?FH#~ۏtn? f?J:ׄ^Ǚ'w-mw~ok׻e\R^^OJW9):r,%~& _~=kן~u7U2G(3e`2Qf0 F(3e`$^8& h4?Hh4?͏Fh4?͏FUfo*\_\a"Z Bή{  Q )Z$d*΀({RנsN]ﻯ?T!{v2!iΩP<VZI[x)-<sHG4=?KiWXpm 9z- r}.'Yd_ ^Oj7(3V7.GVjoCy鏫?#W*t8J0S4:Lv]4B|" ~Q9cfs47Ig=j٤U(]8ѕ2dG|vhr*)P%'k@ww7qv#WǕlPtdžL>m܌u^%M6f/i x![}+!a$`_-lJ X[܅0>ƻBo?Y]͂4yXw{^]If֗{WGɜt,.sM-&X8[]rΗՂ Xb0>)pR V:sZS&QդBg &n ~gZYxݐWk_"y[EÜbYW@aaFramմ: ۧe{e2t6hY*q$"rKyc]xŻ6Wy\"W}%%@1 z'|1Y-&Jf_ A%%gm >ds9Cʠj.Y$Q1Zҙ)7%n f]`L*ۣMںoBMSl>=cH+.:a\,Z Eȿ6QY$Ň $LH'Z>&ڊ1;tNMU6䩡Fa:bC,m5WY}L̉vYM9ʆƅnB7DFcMEr+QG֮rMJE-%J\vqIlZ(Ԇy;&x[ rvF/!9 !j֔I&acGdcW%cE~:h{8pj-(TѴr=}^h&cL0x; S 5'Fɏ1AE;uc:n Kf9{6zeRہ !-1ŬܠEi<1EĚt۳jAS&HOU utP׷WKz X+aI$C)rֲ >c.bӵa5+dggjXV~5o4˹ӖAtcw۶w Geΐ"c*0sŌj2hB۩\7F@'-B<-0pm/Wk'{9Zh?Bt{fbc u61αӁUIt2cB^طbAV-ĊQT UPW]."QiCF0H9:8n(K&ZήfŖk#ca>D_DOpuSxYVfp7{S_U^yw%2ZsC\<A 8Uru6F0Sok+ϹƨZ2j@)M&s׊lrBkڄ( f<ξ/ܫ/3c oFS_i᛿a ŧٻdZk+ےdmr\D*)( Y!d_S,GɆsB6-f>@sG{s=#nm=C'rmNg ^"~p0ɺ[w9R"|Lvk31X@D䔄Ikt&8zO:%#̼-2cuvӒgE4}"_["3 %F3X9Uc2mjƒ0vӎCg.ǚ_5rg4U?ރeYq#@SAQf(7o۩`lR1 %NH)%U):`|{\o|5ZՕۯ3?ޏѢ!K&)Zb0)Fk}N<_[==v^V<8{ܝjQ'Ȳ6{&БIJZJ^( @H++1K@j:zw~)_I;[dc=RZ+%!kY0cBU9+e9i4zXirpOqX#eLb@ڲVP=Cj)YZV Wb#^H؎5 k W|q;vٙ: !=' 3-f%2 Dy|&5^h\pU=:rDE@8SNďfynN.ZT\\|Z _,\&'QWqS3eH?mq?_6r"TB3 ?G([ l&3)R!:=s04C}9_#pZf~Z}v/[Uv%J KqyX\<݊ yҞiR9nwmH_e1s Fq?3>QJsxֽy>f?ߪ'.gNj=91[ӳj^ׅ"dϗ2w'vo9!M=-ݞnhc7EFBpTb`żDOnK'@6J^gli m!3uoL>r篣ϯ v^>?7UaX~)K^*'^V{qtw'O}tTɧp 4h~[{&jiq˩eso}Reݳ:K^8 0+H?2yU?M2MpV߬k] k6X_~ U硄fJ@ӹ|aEct[xX4ݪ1T0Yb )kN\(f4FWEI|t vS8Х(wa]t$(v@HJ+!4)$q*Buo=.vZؙŮkkQZڴcw^F儛A &֫:aT39a_A8ɓyRdXQw[q+2] ϷRn䣝|L8}x>_?`}ל Ǒ?R;sy`n墤Lb^KEZY "gJg/?(u~. }Z7ܶڎߪIOCq|v5jQo˭W~k,:`Pʗ3'a_~U/Kp5Çozȍk^GW|Zoqύ3.hrt5,;T ajȔ,ęIZ@A;.Ov*e;WG8Qe?MlXr1 Lx*)1JE2Ŭ:c5kOI=vX3 .!r"8 rft mm"e5r<; H]Omi&kL/vOv/!㾟ͩ_n6ݗtd?]JU2:C#x!QP|k6%`0``r2 yt(9W a[iG᠆d}H'Hke (Ũ@dT:;P3,i9a~4d:ħ"rx5AMޟk([,MfiQwdPë Y #.V26y@$i-S'TY:FB<,t=4IAX$EGsӧ( >E@yΙb< OV׭d?S;89^DAsj"Qx I`#THI}kR;#V-dv] !+CzRԶew Ćy@dKR n+A1/qac!X b \IdqA#2x[0~Zb !2X4;|w[lp0/aN|v , 2ٖI7EG%BofɊwnbܿE~ne 3 [f0/6a=ߘ=&*tVzIG3|Ńy]epADue=s4j:n:IQ'D2.DH U:gFzJ K[';v_޽fK{-\P@H! D*| z-Qx(` %:+!g&F8n2tZT@gki(SAzcg71r=]r-a{~s&4վfj%LBr~zOF=Lk[p%$Un}L-]L%^!\U\!\er%jp4WWZU{WH.$Oqrp\^\-9'c0 doLp䴃Wrͥo(+JR.{obͯP+v{ :4U9U'Hrսdm24De-~J*71|.XHD 9DQH>%q*{Ax+u226>wUOhY~J}`|5/e|aPT,\>0ǁTfZ?|ɣOU*s‘ؒJ҃EHomR:r&vI6&"8kրwAb쳐S;c$:OZ#u5|MjVS.[ivm.gqЏ.x{//aowb_ߪofyqw-vkLB4v[V6T*t2Ɓ5' l,Z$+UYmyv|ל}XZKjzNOGV1HT4\9G`Tԉ&g F8֑}cm/"%yw|u I."MAFm 1fVHq洢VRψ1B\Ŕ#'PK$ E2>$ "R>rr( u NѾeY-4<cb8ʲ7hO6-? Z.?6t*N~uoy%)RC俿s2/Cǰe{ꧪ8C5s.8QrKp3"اp[LTHJ8,|J?"Ŝ(aH|cy=-y|rςнk5>@lƧT2^!: RS}(HD3I%M{,: %_)ۃ 4&؜I 8["I+ѷWh6*B/ " Ta^*@YA2FiΘJs˰Ɔh1Jb%֘6a⏯Eيikь}P!V\#&9uS>[3a(b\ lK̐nS_ " AgRɜRafXTqLa6B d 8qݏeA ƧM|n$D"M xqm#JXY"4(p qd_NH/P$==OD2{К@Q2 rJ*%Fjv#=Ƽ<IXMq&1F2Z>n5"VP ty)|kua_};@{FB) D~̎;]_[$AK*Ť +b&ٟc}J҆r^S:VR"auAޛE }yق@t`Ô֙bARP!S^IA) vO#`7[  $ քPK R .՗i%^{ϏWQ_eN|SnMLQWWp\*E:mQ^YX?pV\'J5c6=UԌB]KY(x1]W^΂%617Kr\댢YxT\C6vC!\)-fU7de7. } #F1^Mg8+{%h}GuշRaHsX>\jz<?ç8뗷(%k&~+una9}?^\ͯ/_uo//?~Ο`FJ$=KNj!t܄CKˋYj^ ]GLy)Kt_arT pU23 ?^~Fhk*E52ئ_'j̧n|BǁUzmCe01HHawdc]~AIzmAGi4E"Ҍ&.m@v,J1 :yfZAzLZ:̑6T(}FiRBZw9gΑbaoŗ5H_V$e}l"]wӏ$U 0iKaJLyY$d, \ Dl+&=pR8)MRw~G] yD1rDx1>`dc,b>2ˀyx.`8 -G GHkZD~@N]XB5 Vq㍨qcaA4G296?19CzrY "w7$﮷sԀV$Njor_AU!G ΍ X*Q#q$sp,kCDIh u}zV|o' +88FKDSV1f41pM [D@Pc1Cy< MjFU,cgO9k"HzPDY?<'DL~c' XpolClbC{v[pH`hQEMKv@D6`iWm a} ṯO7V@T/㨐Xb=Q9>Fv۲&鮑b@3UXMb-suT|>uSy a1^ꀜ5$ZCQu`F:T 79_g_Z/AjQAUq$UjTڼ7d 4uvj1Y et| tjQuX4,#&W//xƉ@ q>8/-2R(uJ gb #Xajjqb#8qxdlHxۑ3I ><$G*pg=kM2qik#h+7 t*?*c7jQ ąGϫG՟O~JFwܰ)U1ۄ;kHE>ג)E 0fP[絭[lfӤ=ɧ3,O!!WJQ;ĬW鐲]Djd)1sXsS M|gM=t}mȺ}:Kg`>4߆A.xrn~}?VDV;ǖy6$sw_I' j:-9F QB'TDjR@4!)Z7EmO4*X6I3%xA!+aMrv1;oMԷyGqi)ƚc*2Xh$z"KNhcFsL(QmGs+zdXD -J)i.c ٬0qc2!"hҀz)*͡ulw%"RY(ǂ9qq qo  #A5Sߛv!2Ǜo/K'L*T5iw=)x7:kNݬñU{>S&ܤ-} }<\!([JL.0"4o[p8 1X卒J)齥Q'5Fe#)'2mfO7Ѡ@0>a 6KDN+ ɧpqX)"j,2䜠8fT;t*H/V4LBD v6y+4BÊ@~Uf9;',|T@O2 e -# !f %bxW"H/?/4p'ew~Gj7j/u@ݨ37aL@%/,Yq͑3.[3'xtS%O"0L( Z:kY[#a ^ig}J&gZg嫯 u_m>ES,j_{YZ˔Ц-̙̦m!cW6m Jmr)Ѡߴ};̶ҟ Ge%}0u' h_X|ḫ8"b0; ˖h,Eipգ,ࢎ5b忏lP/ C4ݛNZZy2o :>,e;l.o@ĤIiBʑD$8"Lȵ2阄Eni[yuG߃Zkx@&13niml bo1z׃ֽɃoqmt!ugt!(hA]WЅ%WoGAk(Xuܴd|0cu[T;hU?6|](OOVd:[Kkf; wE ju-P>-jC0Y3pUE \j-u2 =\Alv`ɻ/ * pnG)0W#ؘWǑk_yQjJ!Q)n>ϼg7wNFi^|T5sVreE=~1dKVQtXƜ5 ݬ8{2~o\ϲBfQVO?| rJr+o,dG.ΕCڶygm!EWmֲ]׶V^۾Am+CsϋI? nXV=[J`7$5, :PF~|䬹5lhqfQ3J(K2^Q]{楾_`6Bw ӅZw KRp!CpEk u w =\A!LKB.UUN[+%KH{pUF \rMgA pJ v ؝ B]+𝇫B=\EB:۟9>j7U.6\FlwZ٣)83X%#WH*O?V)o`O6. O&`9bf^(s8r!&P.̥aF󼔋F&MdJc<(02 xPj("`1KgLnhLS< Ł)|ӎYuh#(URJ_";W1[<=!z+jlm755&ǵF+iOˊpT5- ]/6pGc%,}BSFL1@FG Fs5Tʼ ˆV{o$(H߽-}6'_2 ]acv+zw])ؕ72,/Ӳaх6)o~Nmpm-k|kuKFʶ^l6ğlumr*.cuj畺`kD^&Bt 6LDz,O1Ld XƵ9V;] ̬WPuҍ 6BRMEA\J Tg?n[n!'s{~_+R?^-Qޅtry5X٬L4sev~b^e$\&@PR\&CJ:sap 32q?gƹ죏9K^ǝcvX'_IôZޯ辛nwZ1Z!KbLEK#0r2*P E/V3=Wb";.6| t ,ly.MC!toL;/P5={7M]WrPXL0HƊ<[`,~m/C+@rJyVgQ}{Vhvw8ֻ0N>7zixݬ!22-Pγ:C34Vy;쵶9}W,/2şG5rjkQ*R0dvAqTs<1PF H,9CyWE-1I QG35Xj' BR @Т/@cip85eJu( u/}I<7^hLF0qp޳7DQ,-dĊX,EC֚f$&а``D{AjWɭ% ;ZDF4*e t)oZ"٥skRgWs)g4 _ߖ.h i:OG ȣ+!9JgE])tMʳ8wKąne~?eXmJ}y-qCA P1# ;B-%aM؏g{idٵF䀸+K\`,yJ5IɵhՂxE2 u,x|tX8)붹>?-f 8pqu_`2%͞WG# 9:eh/lp 7E u~?']8=6vip}fwK~uhX_w b,4MkbpGãͼJ{=K"b/g6ĻnQii5ojHkQfQ>4E3Xfhxt5=/^;`:u{WI]:-i.e`Ei(>vA=>4w0lN׼Z:ð@zj4qa8mZV0}q/ :.<8 O pܔNauOY;wl"ƛ- l0bm 6|5UDZCvFTaΗ}{}֒oQhwJ!;A$K3 /֑!cޥv,"?s@\(KV"p T,HLAiecgOv Ok+$uay`ţM,WRDmRD(8E@9X!4:Z`ϊɣx/g)i(- (O1Ƙѐ5.Ho`9ƨcyh׍qμF)7G/~[Ƶaq t(^;,Qρ( U!\fIz̤o&_~EA:A:$(J  E}p^ZdJa #\AF0DFUpb8:}M532lP|=!*3J-qwl)c6ҊrGe}RRsSbfNuԽYqSKDsɟ&w?Wg-5oCrꒄZ_pO=>&a;gϷgmT;w3.BS\å 7 / `~ TnvWO?~ J;qMJ %0/TvnYdω3uu{Ɖua:s \H@ фpo 6..pF6˷W'7E,PnC.w Шڗ_tw~jM>0ݪM όQ|/w~ZK]T ebLuo+p*p:"eVk7 )]1-p+:@jFvJÃ&̔ jR"7Mړ}3wf9FE^BfR0vY4!e)ޱakbOyα5L26u]_Bf >UWc(=hp*w^UOC#7z]$!}7L/aS*zj3Ls%j9-YFRZ ׊BIz)jKO}$ ```'U͔BՊ㕰_zq<)ZZq-l^ kwt P0c}(:.9YFoP1G EέL5""Kw9lVJHDusodghzNCAӆݚCZ:Y1%pW^Pn `X1(H2bZ/3K%>: k1MN'Ρ//Yh}Kv2J 8VJPA4 Q #CtHKǝB,RtX(2g eZjX$"﵌@.Fs+%"3&ΖAZe<!W|o n?ohO&<-^| Мcyg2*杝;&* ZZǺ$]}k| ń{~L R-ѝd;ȑacξv]>zC.U7ϔ\?Mnv?c~Џex'oboMʳسOLdI'E͢޶49'_w!QYVBtZ۝ g_@=Tak<լ㚑5V+$=;I'NɑtyB$]Iw(%S̢P1psUpr.&Vxhf\BM/VVimŻ'`V^%#Yy/ 0¹xA}+R#+y)z$%-H 1 ]<ϙOdxZG[3"mmu;"*>'T}‰HiG`:-u:Z)&͢ ΝSBhxa qy0F,B0#kDꥦlFLҙ+FQ4`pPLhOWR)X3BzY ͟:r6w㫶V٤3{D<^W>PAz ÔO7 DK(¤R$1Ơ5Z`(3)΄3ʀR FWtR&Utd]R!Pj1NOZvNItMVpvdYv܄,&XD5 ju:Ujw1|]A@UH՟?1Voπm5qXI&.^c8D,"6³Ԁ^.hIZV$ bFDT9 igQګk1|φJ&spЃG6=2~3[SL2ن;]xj dNyM /ڸߧn=ϵ%:gEno~o$ڮV|uA.tJ2]d:%NISd:sYxYN%NISd:%NISd:%NIS3F,t I/tJ2L$)tJ2L$9U*~IT)qJUm'W$JԺR}TKJޢ>q%/{b>j3ǟϰƦUuoh7.c)}5uz( -6{H1,Ǩ]<|ҹVqʦaxs76\ ay6^GWS7_,/WaƫQ7 UoBCA&^UOC#7z]}gk%za-!͂YG70cf|7&%L!9P3 1dgѹ0k%p@TUꗀ%~ _ꗀ_QDѢ8ZGD.KD.ah2/^%ۙ98@ၱXkTN`)$Ť](׉8}}e'l4  Qč㌓ĝ_H 1&eO{;7pdq8h<E&o>q̷0rMnPδT}Sr>RR h Ku__+nq]_27Ś9k˭#:F ^ʁrȳV% ZGK!zƌƁZ0cW VJ;+G+p,_zM{fOdX!f_?f[ q$3edAB kC`TQSCw#ٻ6r$Ugic^d 60(ȒG$}+vKH-S<pnYbWXUG-urӠ;b߲U{R[0DdOLɫ`M3)cJ gFe#YSPҥmN9k {Ėvпo¸hL^o)p%$ s'rRÕ39C P'V8h eAmSl@$ͿE@ hQH4㒣@kML(8\FM(02[e? 0*{V"gm|qk=oUuÛ%EQ%BiКFkIQu={@r>NEt֜cd2&&NB\_d=(l*!*T Xs72Uaa1 ua, otTLf&y&pq%^v@~L<1b'24R/BΉG)sETIN\i.AVȜI2dcEF8MeJ%|l m;blb‚ O$hS\)rFl?%s_Pwlڦ0j; v͖eOQZ \0k JdT',g"xPirIZȨRVtk"E`&  1 /8$Q ڕ.R1Ʀ/"-C7 \$G4)0fQ:KT4\mh(!hEm IIiʣ,e#%"&i*Bp+(9H9|qq<Ob\%. ^pMoVm" ZSW:e}uzR)OFiv.pPL!/z <-c*J}qU{&1'k.}k]E \YJW !OsJb=,*V/%sCB^Ⱦe1essUʭng^K˿>wuݣp`yܾ\s}ZhHmmk*cOg%=͋y9L+(LK&v5?p<9!'78M= c sQ?Q<\p}?q*[j4KarPpxRy,yerɿีa~)xQB"4#[%7c]]Ų4uu)Q퓿Ovz^j>}Ҩ Uٝs>EV_4|qUO*{Bnт\]qy4+ʙ)Y}-?+K<_ Uғ@ ^]l۩ˆz&9HO͋]Cdkv *kp$>ZyDY{b+:Wʦ/=@\45u1иp{c89"0i5)>jKS$(F=a >ߣnK9ʔB[+5݇hKn>sy-7KOq]<^ى*Z#-Z@N, I!=u2$(T].K[:o0I\:L3 'Bk4% (.h.=TBƝC mz1*$`.a%<"ʀ3e`G̒#cd tT[ӽ6"1V 2bYe5E:&'5XІN 6߃kgJiǸǨT V'%+Q/]ɤwBgWg ?X0qrq&h O{غ[fТ' +6*H0D?m]e_y0ퟃ.;+ Ɔ'~D|ne?,qH ٝL͏dL+)n< ߿:O/oٽFp+\ \F@teV[ 7^ki$שX ]|s6 G᪺t V WdƋGkm 7hp18٫B(zF7:VSuFW|*muw47^M' X1[ov]^%GtҜ>wap"Y7ۼ=gX7 [;t4ˌ0```^żB]&l[!ͳοY}W]:g0O^y=/QI%!r #e,F#d`2f^NDXwS`g $-A* JU=\H"7a9 s^Յ\>OȥVV0]YqGkj \ȄRkU-Тu ĉ'KE5 iT'a+Z~h$meIJ8 {X2WPڑ+3_UI6:AI 8+ $\TN[HII.'F(ɢ"bܣ̼{jOc0@H4D4i%ԠD!':0$'M QFf;\*H')#).acHk7M *N cR$}i>[vUIT6>:!tndwɜ?BU/B5N$)ASci%3JRA[2̚wIĝC}fr\ٔMmg۹|{~sU1d>nSa]EtOM$ i\ 2Ѥ4'0;ODL ( Jq!N! T ? 00js[;xy۲hHsKO]p*G D"H*J?39wG!jϬ)w5p~SOkGl~̬6n=U~bJJ2/VzCr)Neb%2aFGuQtSxyg-tGC ^IhJIJ9 {@Z C!xB):a JHBԞ8mrcUp t+)A85d:=8޳sM;,+p@~ DD5LE GF}Shkxǵ!{UolY)g*ɾ^Xǹo:z|ˋs]Lx{,?.IJ].O jbVACp*.k7]*~>SJ2E[V(Lz|ev}6cO9>+s9n|崄K`ߕh\(GuhR<&< yMw7Yv'nуV=?iFe y{wxoΆ׏m7,v\{pjavz^j>}UٝYo0`w6~f2 -ou6o54Grcu5bv??{ƍ K/Ijk$\7W!k{T%{N˦TJ)R!V6fHx8(r؜!и5ntCBMɜU9,PJJtQW_yN* ٨,E]ei9tuԬP]Iɉ4H]T?tⲇ6SWYZ%]]uU+-]c%=CTc .;o~dr~&$~3tGyE-S2V},xNE*pT5l>->ymĸ3Zۜ(5LS ʺ<P9/x(7 խ]ZG2L:v8ʫ( Įӻ!,̞M:!Xpn݉gs9ϊ ]ӀzNGGfD{u]Jhx~owzٻ'X/NcWޣT;8 po5 oWyMq%ObЯǟjlAsj7ٿcgCsY>=|*7Tl-T-iV\N!OX"v7wFByPziW7cTMɳj^{ߣw4h7&H⬛/Êa&ϬI"#tAlM?gyTϖJк]Z&G}ǝ2(v#Îyg֛;{@EEo:߽\c0Jt (펿b7:]sm<-ŵZiv9$Yf;2,1cLR^xKf/<ĔgtaJX<U/LRr{xaHPqR*>~o ITIP1*'4yO5@0kz졬a8k8)m h PN) IB@e&^3Ѧ]:GQ v )K$Epk1ٲ5ܚ8;F6MN;?g&InT~py=ݻ MGmMJ ܒd3שT-ef}n)z ^r\9h^٥[(Z .m۶}УcMӜvռbFnIk{N.־/vS)6X!%Ͻy>  MnϠ 6mo sR'R L1Ss髛 _L_v+%@D>ϊ;܉s[q%>+PUy WάMspmZ0҃p\3*A4%y題tuF<8#]H$_xԉ(& !Yph#hnI@ZXh.P፧:c\k\ "8YJ#SFzkB}^/||q9~sY~o!ۊozl Sf`n է>YtP[Ó*9+!mdpeEHZWDT.j[9 SVP>*sP^p>nYPZCC΁fJ;KIzGdHIg5&0Jۍ.|'g ۩B: ko75BmooʊcCaHA)(i7vs,RA9Yimb9UHGqf+I ShDqpE#):VZkpE*Ó8-UѓM\"N"v&Q'qqoP"*pW޾|ؘf>|G͹Xm{ɥ`uzM: <6R).w4tY, zB Ikja +G'U+]rVb<}ɇv/y*~ z}qFӚW`u" tX5{X5\?U#[5k[־Uc4%IYs$ s݃@*c2VH1ZVʗ!ӫ!Mɝ"xy˻z,牠O>mkVs&-Z| /ZA)E+QUlrS/)ьʡdY,5ɒ9Cm(\K܇7ɘ $猓BxVɊ x0"' '{M;Yn?tc|*{5>o޵2=[yОN ("óQ$FT@t C21FFC)moEK(F@bLx-.my&g} 92}lWI۔ڦhrŲ|"Rj :ٰeq)<6,-g&QľB61(n^;¦p!C8]RSb)+S:GS2&C-+"Z\QrW= XOAM1zX.ӟ*ƌ'J"!|sՎI-DϹ?2kkK$I#׈a!#eW!YJVA-ӔBZwfeȓU5 ՝)BA2p2rj39ҙ, 2!V]\S5^=uap]+ЎBna]>^8 kULqQtMC~)/ѥ39v>] `iYJnznc;x].|7K[+I/aϾk>ŋ[GҚk4tnZfITEB͚5\^q[HBzրО۝|*u6O~|5>|j@s-F-̅^WnnuQӬոչ:/u5͍rM VtAj'[\!̽wAw3m݋xCkkedsFu][9BfV( > }.j+u =Asp{GB5Jºqw/q:~緧o^Mݏ߿y{J>}Wowo^׊Xѱv֦N4nݵ;92_`[c[]pd|i_Aq\4QOX-^ljoZ5װCՂ4إ^hkrQV!@:|~fycA %_qC BqDɳǿX$$S[ 8LKU/OuГ^h_WeR$(v!]e,F#d0\;I ̥F\>Mȥ0j ӭx󖢚<PT:YZVY qpB"C4Pk+틋ĝ.cIKd9P/Yf88n' xfcpe%)Ĩ5cw:A 10vN[l}Sr_S@}RQ/u@RB5@$-Dc-a$zR$ӎgHE݅ eJQ"mjOTHcx[9QzZ߾E wni^g+겫J "}$V(<}I94SP'H46M"Pe-S{CXH *Nd30zŨ1|J{p#HOEYcƆ9,QO־ 읜+ T܁O4+3Fk]٥ 96δ8>r'AD*ιYmS @Z}lXېK/}vʉ4}RڻHEl]"nZDpHZu8.ع͗^u s}1kB#;oc'Zfl?5Y6\z}^h]moI+aکa.3s v=,p,y$ىwqXݒ,;,%GN*[ݪbW,Eq rݳ ;w:-'166duK_oF͹{P`'̭6Zl4Fz!R6:-`uHG"5"xmd>5ju}WV׌ 2l-%Ik pZ)CTQRFe:%*EKZy*ck}q߁tt;錟#~CK8ZCfܸ~(lٖM-vj}#SSAMq3-ܠ &= 1JFp_7C }8A'yp'Zނ!%x`RJJ9)jgRƔ8卢F8MeBPHH# L } {KDФ.yg+*}T-Op7:$&OeCꨈJH.A"1KN +g4s@NpTb ڲC*dȄ$i-R@B^kbBaA-F݂  R7=6PjɎU\ڵ?ڔ㫶zM[o3tm"Cή1Rv6G9kS+hc(BiКFkIQu={@1i=4}@!F(=oeLL2T=L0Rd=(l*!*s.4֌٢ؠRL]u. eՅ{Յ M/e"؆N'\M jw74n0}'\c'2tR/L1|.,9iVK 2mY^ÓeΞp48j++Ӕ-NcS΄V!mJ"nGøb.jmSXkgK2Ì'(g. zT2 T' R. B@/M.x#@-C*Ћ4E * 1H:ՠ]ibܭ[>Lb<mahFq#|/MB F89N5YB0_R]1H$&)Z>(yI4!'̈́NSr[aX5珀ڣ^43:Xg1.Q/ӋTXfZ+6 GJ-)Y Cg*lzd g?et6s; f?~G!U׶hr,U-lJ Sf/UDHꃦk0!59psW3ʚDHa<R\"9DHs.z"D(!Bӑmi;+ Z V5 2 H%4i-L%ʬbN0p5=]bKe"ػМG_l=gxgaҬ/jw{|9ǧ`P%G^Q)Ƅ>HZ$BB:4.#"À9(H)vj=b,9A]`9w ꚵ:^ s%2ܼڳAB3?]${3G7v+Gu *G`̣yT0 /_nicBPm QkX+G`̣yT0 Q<*G *G`̣yT0 Q<*G̣yT0 Q5b`̣yTX<*G`̣yT0ُvُ+GM`̣yT0 Q<*G`̣yT0]9%0Kcg!F; nqFII۫ LqmQ#M:z&sC[:/je$>Z'm'V ḱ j:ylJ Pa{thFּ# ||^hDha:sP.HJ@&ai DH!(T]wuV[tFc`ZHN " N$c (8PكG!Aw_yPp CZVu1Z0EtQzHhCDx #4Y㈡7a1?8 JGEce'(#lh!QvZS crRmH0yTVvIhX;PId1HF) q{|}Jku[¼ALʭӃ6u;ZooKQ&q 5! vr1GENa}nB16[ MY &.?qIݿ.+m) Ɩ׷˱Uq">n996G15a4ɎVSdg~pu<"<;mfE ,Zrr;Vk r.H?I$N3}~X_Fƞ s{ zU&U•}1k6^?6=;Q|[.ęV  a*cJ7xYc eSJP=&.4ى/?NU6Uon$52hJth%3JRNe5AarY P0Ao. ?Om>_Ӆv{ҽbZ6׎)IJ&@) QEiNvnWí Ϻ<̬j3ੵydc΃e^R K  e=ŒֱFQO3O?ƪz֪y7+IMs Y 2JXHc=a PT9dA,Y8}c iHDZtg='ug%<:'I T*T/&$`֛(a8dc.enƼ TBgx'~jYpbIc>" 45r"Mh$K(:YLhTŔд][5 ޤd`ݷܨfgtk[mkj C= c@Kzn{pvfw@lt_UmwSv]m5:_5:#CMoqn )CuC\kG[)[Úuj#|͈^yg[-96s-msWǕkn;?m{-F-Z|7խy6X~_@kt˷!֍Mkra:_  +~bh / `tw4Z -6oX ?L'[]@G[n<xprǘ7ߓ|ܿwٍ{&i:/]@f;=q;ᝍuޤcafs6L4qgCܛw(X*Ri '8a DD5|ʖ47 h-zuλb!6xl90  v*V/h:d8r3uIi-!F%ZX%ijI^̓.4zGC h-Ĵ/=j˷8' E¥Gol4zpwߎP ?׋1JEarR^;KnTѭ[ r/֍=f~՘mu)~ WZЃk5!^[%[oӮi[FEM1@FE+Qvĝ7I}3B?QZ$cb=~T] ?ET[1RVƏ\9YWM|| okaPgz?$՘۫\KԒrl*j{`]q(Q/HL.M5n˼Ƨ&)r#ԝlDh8Hk *U9DJ9<~R(O 69T V9 x)֛Rp+j2v3;)~̼z+OdN`c$1KoJpyFM P{Rl\PWS} Z=!`$XC .]b=g#c|3=D K|Ѧ`YL m J1̍\+Ԓ]dP@V焔 !]֥H^Xdʐai`YcL{\kp/ϡ&+6(<# TYoްpLPҝc^-W˰Y7>aKe i!zi48$YH1F\ʎO,r/D/{b/E/(er$I^k2ɢt+J3pBBuP(n8KIIxlJ9$3̐2pVz=W-VrdC_p$ nwi6`gf;d0;YHt(דSo?VO; 7BkBt"}?'3>E76;.aդEot~{~8[c7}{O^\܆GKWnnpfg{O-mkA-}b2u˃)B./T )MsV٥N^tWw>߃>_Jzm~ mΤum^U$/hr6+A?]d߂H Õ'FJ[ >HF'iAzCSD>rN#(Dz%Q0dSdm4)RE9QqQݮ͎Ȍ F:Yb>2l`&YBFg}2PBSVr43JEzkpv\WjyzFs'd58jX \6hLSW bEiFbzz Mwwi<6<v2db1Jq+`d҂>sa >>Ebx.YT9b3ŵ  $71$8)UeHM匵]  g3tD˧6-X68fhe^^8 0ovkǞ˞-JyQmL,vO UH bk-YO'M9y Mi+qz#ٓB!OC>oO;&ǵɤa01s|g)m?ӌor*[njr)^$F%T;eoDLKS5ѵ/_(7m r෽9QT+򳿗דz]wI#Wy-7Fp<'4O`{U U3 mfE{Q5kK־2ѬK$HY 52݃@ikK(k=4[мki^?!-ƂOI} ߲?,Ã`O}D}Vv((fFv&(Tt%hA+-(AF`R~B9>G[Pm9'ij p퉋A Q}ȡ' ?Vp-56 fRd!{o2Nf)C&܋Ж(3>&EVW xrF$f"U2- >.j|=&=4(46' }l~ : EsNWƨ:ib&s` %.F:%TM85>1#5Qq:hmrуehwV%˘1g"8Ш$35H֜ˠNu-lWߡ5[:ۿzMkEy'3OsezY^R{N'It[YauY팸pD\Qy&o>j44"EXiq?ELS.0Cљ @˗<|v풹BU+UWUA\zs퐹*o팹*p9특*hKizĹVڝVOvlve}6;XKTQ팟ZAeO U(;d `Н1W̮D_zUA+c.wzIOQ-F5ӧuWc7~eM1k|i7BSQ9x[ im8x\>|J&%ZNU^1VVHDJ *?\ yS ~_WV22x)âBbkܬLwN+EvxOfJX3)\gլ.]o^;)Tf}G}ᎺnWZ=ɼTX-cJ`ɧ'R)̡5#kuXHFT<-ZhGǵX/(O1z19H8S#,56*2igtxWtA+tA$z3v̴@RXf:c \nb ZPn J{sPm퐹j*p#-.(94WZk$%JA&:c \κb Zq쪠4WD2W쎹*p승*h ;vsUP\Esݥ+liw G[x$E[x$#Л=xA`rp9Hvqix|R_oBqu,,^ro7rv\i©f xX>̳: ==}fV_ulׅ_-9;٥\m:Їग़̏ne'u Δe#eiA%%cRp*1((PӅA.qG+猯$?.T*]wXuTf3 ~@֝x[x[P78`i^,JwҼ:&e "1O U@rxQr%+c: ?S%ڒ(K4MtFct^d+ 3mE6Zo$wZPR<ĩ3 md~ J"T:VdMp;'4 جLQ kDJ%S*cwN&hx }iߴ4uw7Kd62:~ۧ0ِܣR/x y}^>/x ӕK>xDZբ+{ Z{X`8ZKDL"$0& !T#^#`&ort2[5j"*$Sl.HL sR/匵;ك9QR >쨉2Dz@!  ڨP dlƷ,[]*[ycȆ;4 |}{d @k[^̵)؎숝R |)BhטgF;$sZ<6MłF/n/'͠fr`YK_ʵ{E=5A[G겗76hlbB`Vi) D(RkSu5,c@n/Ҡueļ!_zVwG {ww]7uqi0,W+nvza(wxו]h> \]]X\|So[@B=Ĵ1-~9_[UZ4H3[G9<~R-.I 2@-[>:u*uaupS.p-P%IkR2D'R*U%mO$d Xʔ%ȱZp-Ь^Ǥ{^f]_eyj r?z7lnzzz(P C8Rv}ʥPGC=bwH<\?Ŧq+Xcbǃ/msFJDdpxd'I 7!plBCIX4MH\6AdL..٫`Y *깔1g`SިZ3QwmHwiwsep;d ,60Rt%%';b%D-2ȱYMV)VϬjЈAZen b DHч@JYJd \ >w*h4R 3֪= #[f?P#ǿNp!eX$P.Qa 嚋bL_?ՈFtxЈk AF/E!Y( 1wLh`(I ,猼Ft#I6 <=RV"I6FB\ Xq+kjlPRҋӦ}uV-udxЋ8FpNJcH@FBs)f!B.d%4.zqzq_a5Vp˳eT꾦{'q ڵa2vwR6mG?]MKZ؜pIW x`46xU#dbA8JQ {.Q ݋@H"*, p>fd;q4,ee 5p&Jҷz,a[.}Q?g_77y*??>A YYB'-hi|@HPEH .e"A*tiu.%La~C<97{*Ϻ{g֛vJUu!EZ >̳(14 V*R58u(Fq[ѷS殶`S{4m%!';}dǓmAXhu_4LBr}`m_`NIu*HpAkᡪmtBY2z xD%! M(!\9C 99j]U#gC=2əQedZi槓Wz!׍e79 MN^,ˍm.7JY4JDMF6P9}TPei \$H,A]dx>X[ uQzyZK`4En4rC`r I5$= IH}7: +-Cu*`tº EcF`Rsd"Z@:Av3{ 1z9ni[(6I̹L;dT,#Yk )L^th,XA+H7 k8;%J)EѨe@AiիZKWq6ZW4d^i?uаF)p1]q8 m)xM;?XciTbz#3cŗnœISKϷ+U &n2T,bJ-xS< ˣ\=[k5C/rtdE0x,X\M3N-M4!x]r\8 dzB>m micz^~oI7K4]_Nnk%SǎmhE߫^K*RfZ +>̐+SinOz0o^܌k}mه׍89=shxa>]5GY[?^Nǯ$c}#][#IzӾax0 w0x(2e'G^9<ּ .^oԾwri9|OXʃ G0BaSt':l8?b?߼/ox=~߽v? 0$Xqū-ƥmV_.a~Nc9\D8J^8 +D?1p:]}"2_Le4bhaC+V;6d\Y{*ԪKvF@_M9O3u#ݴ$WšX]TJc Fos5d&pmh:$޸lMl !*G Mywy!c{mO2IAJY Ǭ  @VKnJ"j3PUbuԖ;)qUj7=~U03%~<-ڭGznOV 8xe+i(C 7J.&\) Iy$IPbJ^F؉F(ܱ>)}P E1pp墚IECxJ &|ڳH+WTv[oR8hSӔ,a:ZA-w졈6O#+]j#2Q9"'`I֢"JxI5C#<3A_o^읍 R9Yd i# 8Dk ,^%EH tʙLpu-Fz1l~ѡ?,b#̵EN6ǩ+=q*F ~~*_]*&N>et[7]dMLmu)a.y_j3*WAofߜ1[ھn lI (uw_؀ 3Cm2ܐޣ_Z<6Wһټ9~nUm01Rm8]axk8N܃^s]lr~R&h6i8J4ݼﺟ~,,'K#K|snX L_ݛ]V4_31S2>옄]}]CQ_atgl<ϛmv 8;~Dm5rCqy&=Ե︸zI ocQ9嚁޼S}q @"jx m="zGlu:LRp@3 3k0&7\c> Z2DFt"qS8‘3P<]9P1R2!]6<y¢!qɹ xx,͜(14{UolYRȺ*ʵhz!'*#@uAň/2 y<&ܸ&h5`9rw,߷H|$5~$;w( n%O='?/%+u{;Ag7<}K,ZneX>O.sVW!=qEʎ"zV8aRr"\6՚䉳s1i1u|%åb a#2=%1NyFgn^żiEz.2MŐ>A+{}ľ\ȝ_H?>ZiwKT|٬+e0RmQ9Kl[cswJ&+}PHz0NRt!=[%]ӛv{E{>inaӳom$w=.=$ ɘvڝ{]QR(٨F-8By]mo[7+l9@>N-^4]ࢅW[,]s$[ؑMCrp8%VdaaZ(HgG8nbX)l2-b !H>lb 4r)xkb&0dtLy?ڲZtu΂nGW4^:AOcyw^QTFv:wM\}9ʢoMmx>/QuOoOx|k8송fZ`F~Kҽ,@ZTO*TYW [Q m*4JV .]TsŖ'ۜC}(uE)+9e#5r)QgPc|q9Ufs}\YV6Z4Zϭbp^MmCMUԻ T뫟F_LL2 ѳJ(@Pb?;̌kNt7Dqc)3y7`ܼIhwp~̐H8ۛ+a. ߙ:kĮ\)ru 3u:uA]WXsȅmpP+Ur/PjCpE݁B-WJ!p•(.iWD*r+pUEv*Tl{rJ -!" v fg|WDdjP9a՗Wf \rhWD-w* +Su˛ Zu$CL G-aMA;q[rGGΟ>L9TΎ+]qm xep0a#81XK+e,p4Y"|[gLSq?|q:3[/[n";-`?هU qm(zmZ .PgLm'C7:/ o1jsب3 +OͿxl->sq@}|&rIx6mz،kuH-Mi\\ 5􀡕$ՇƃautcbO~MU*ŨBݏ,QG5$myeǗߵJPU*\3*mZ MIIcۘvYaj5wƝ98:jYb0 I[Gd1˦ ;h!G5ᣚ[Zbu{|{0?EKvU ż78Yπl6$s¬FY*Nt;B_wpaU4~s9a+kD-IR=%esCɲ4 GUcoUFzP,Ug-9+ V^Ҿ> :M#BDiRPKkR2D2LE$t 1h2PRXkqr)wrg|l .ܓ|͟|[Nm6;ͧFcP;tUATUjWθ f vEq0} .6zuf~F=?` d +.U~$oDĒ[WowrV6. O&[d*X(MKYξqU|zd&{Ē'\[bº[P*FJw' *3B2I=JBITNiΰ E:-c>* YAdD O1zKf&,ZPY1!Yix?;#s#iY䎯J-v]\޻-On^r׏OS%[EoVrJGTB!lMFJ G/$&}RZybHVQc޳r@Y'I1[# !($`$m#ckFtְ5 uX(XXx'hGIj&yt= h7/Tu?3#vVAHiCHwR:&В &;-2E%4R1bYFdi(ƞ R/) }mԙl;flhZEgiXV[Fl?s[P5M˨m`l9Xn,嬕I 6@[F@/(UR3r򤤠2U<̤4$ 4C@H&&F D'YFȨNڹ5rVamޥ[b[cӷ"m96HѠcԪUG,SxB2N/Y9vSi`L *I"fAD<$DYp0xܶSiEjD|h=hZ_g|qɆ]\D=..xZDm" $FB#1d!L\`F%DE"ŧmָcS<Ėpó'1gUkY>Xkkni=qoZzOFR:j!|VBr帓@obN Of!xF3Tf+!] L(Sf>%]2хlct'#Cm!JRkzЪf͑{ 5ʌ) 0>&IyrF$0,]f̶YFΚCBu9| x{DEjwv<~{a.1S/*|t]5=~epEY6KmgmOdZfrӶeN%H+ـwY5wOk̳IT%_F*',TJJ8fVVHrSx[~FO,ɝŮ' l녁ڢ.ڐjlEY癕N+=:{lΚVus|Ґҫ(Pġ8ㅐIĥմ{t!GbGW}[G3BI7T̏WÐ^O>YErR*kaPI߼`Yhir1*Z#-C-E`\甂%T X JNCb(⌑Xê@kRI^n{ڤ o *qMQgpAgmfcQk4&H!b~Zmoq+5C % vb6^XG?hm`*]$@֧SԒ d\E2A13c9ٿ$ssmM JG2șd3cH4J(tۤ F{AjW6 Z)r;JFF4Jiq* Fꤷ{%4󃃸k-KT [3^4)ZC㯺ظ[濓EW+RgיRNDh*O`%q|y҅qÌ:5]OjS&5y?''޹gPبC0dP䇗L+!@C7 ߻> /SVgӿb!qW4X.tRMOVސiBN '~7Ӏq~z_{ަΥd2K4/WhJ# :x0u7m{ްA7?h8/7CTSlC޹3۝f_'?O>8Uc)Us>t=;oUyqYAO]fb{[j .uv#v3(Icj,XvAw=m݋x$r hl!{u|%?úՈ+wG3hB`XiT%*7Mc~ĎקN^߾z} ܜ|wroi8vw! $>s&V S+>|ۏ/P@94׏8 O p:aĢiǬ̃ WU[]SӮ5QO`~!/ oW9\Ħ)uUu1Hw5Ka͟X]|醒{,dٙR 4s)@?~oN Г 8Lѥm:Llyf:/ʚE-;<88Ha\a$;r[n"DbݛHLmr㝭kbry\ӽA9z>0Y9꧉aLC?|RTS(3J+mHO{Zy 431k3GS"ejz}#IK5+8%4ؘ2 !,)gYOlQ|<.X)͌6( < k P? kk6%.cV g4+s(joF.%){HMLc1>"'^qϷd))#!q'*(T><̍d( ,pPr\l>ہPMU#2\*RJlRXEd R~Bp,"EDM7{,<5o-߲^fD_-?bXGxlكڃxֻkz퀴.\*TOV'nRJ}RJwX^sz<Ց2AD"FCaJJ,1bnTTƭ|asglySc?Ƅ%I#7)7*!IZc.!aRpvrer69hCG*V]Y9xX̶zEKhuJcd$2; k| "# 6P9HHe' u*邧간w\X$SKD3 l<2]gt=`wZcL3<$nerUZ'dnZWۮ累yo6}Y7E˞?r1"P[ B4/`2^!n='} o]kl.2)#QQ%Hτĉ@%3zj0ѠOZjD & _.ƏIcr@8fQҒʐ"<|Xl'>=Emzv/!.oe*} 7f⛤XߊI&_qjZEz;lT1GR]a/V=|Er$/H2|,IFwrR(:yYFV#- Ue-"H BkG ]{{t[6,]_ ؐRz:tͯ—:ƢI?ZN#Ix=`Ԏ y%/;#kX.]%:l%I|QPV%|ԡ|HN k֣.o DsR=WMn)A<<*5/­oMy?meNS~sf )Ƣ^ uh6|=].svMgfMFM CZiuxy"~6m(a^/5H$ <"|"ⵓZfML0Pc -58ԍ/W xM#I*KT~™d_ TO!=}gs;<.1Q$^|B$G*pg=ko֤ "FZQ>,B/@^ɶ6,5BYR# &fąU=΍H܈[R<By@^7Y^ޏ*2͛p;dHya]Q 2)Mk+7>~ ˺ƚ1sBLb)ȹJI/d(/Ƹ4!2,qt L F\^F4!@gJI*h&Z(!:@N!)R:oC ApHYu2L-`^k5f,`ZF FS^fc]|r=MGmx6Uuͻ |9k-u;<]W^V4nS;ݢ҃[LeN+LB= ?{`u1a KԺ[g:<:}S1AKk-t''Ck~:݊>b_=@1?9(j{ ۮ˦] ;u ɯsur=篠]knnO7' eg+ z" r& J*"lɑG%q\2f*׵s)J蝒.{^(zY*"<\{$.TѨbmJzsd0i oMJ鸵^ON}9+\ZPk(Դp{B#`^脫$atVe5Z w՟Aw&ߥԄ3Gv:{-~6|矊~fR=[@ۭKջ3Yќz@gtv5[q^10+xor\^HnӢ$JђdKk,)Q?p2={'{t-"[c>takEP (t,e(Pd3Qj1֊QY4PL8S}kv*4Um6<+m!8hjԤX0N mtGBf`PJ-4:H:@!ТjLJᴣJ@:ʻYV[sJ( # (oR\ .ON 'UgKhxm\;_MݦSPA)_/?8W>PAz ÔOWE X"HـHyA@1NJ|4٠"hA4L rF**XJB;8;wN MXno+B⚈fyxK)fj|Ʀ{_AMaIx(pTZۓzPv=c(xħtF /ޙ6~al =}:XU:+v9UZ@%7zSvTrhxןS瓦r4L)yr84La;jfLJJ-"6>l VZߤoPreKfV"5Ag*CAv3Ń S+]ʗ=o2Fcl~D*kX0#73N;!!vD0hx*Hq^|/N\+?JP݁/,5xe\W"g?h= a?x8&˘D0ѳIkyB)u-Ĕ#"oA!]. l?U([5aiTi81&7 A& n8+Ѡ(c-eѽpJ˵S4s+dJ`e< 9'0@+  3;(@jؼy6o޾M4*]ozkapX@Z|;7o8e>BĮo%U=3qI}({*"&cm2B+6f)CxDsC8O+~?na4< ;)0WU8WͅR i`X뻘N2VtVоĥ4;}DVq ߌJgv:zJo/7zHO#yp0jH|gpH%nms!guwU{uFUU~Q*:EfQ֫WWRK|t|;7)?]p"bέ퟽t|TJC+֮a-thwʸO륫Nޢ;N.f'G)g%_ON`\`]]ҙ!ݷ~ds8omD}gl?Btqyv'x<vLMR?o7>Wip|r&՟w7)=7sMYۨlٰܣ>5 -wefg5uon?5_gg0ྎ3f.GfOVYRφ5[k\]h[v}I3zջc־l7gdn}yo?ef_/VKٛuU*[G QRvִ`/Ng,i"\ΛzV K.c4PjMmr)f W֛T>_ctl T9q{'ZJQ+/6{Gb:0JHkf$]^x Y̼cc $$ eFjuV":J(k! e 1P #qҰI&}EՊX{Buf{ M/ĥj*٠db̠JUYa:iB0`s#v3TO>\E3VZF [ŬΎ6D m3bka&a=Cw / TxU2GH}1[Zm\U&1:#y0_(Xq=:!Pqv/1E5H#VF/C̓pFL"(يG+^ca,A,J3-VUBi5eHTyPZG(o<*"4pH+C}Cշlb1 +Ikj]Є:Hg 9)E*aX0jH c,1@Z*(.FR<&z U @*cKULmqLG[Pc@sກ6[[1syO jETAZQk|%Z=Lb2Վ *WZ@׶{[>7p5801^򿺽-7לAPK}4 ŢW>ڡ*`)o"a2نf@2~.6O 4%鰀l* 6_4fs˥ZpUY.!b!c-ٱPΔZ&T$bDP!j}rQ!bՁsקmfIiGgC@'krpZq]h{'`8 (P'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qp@enUN Zύ ׬ 3FgGN Q@Q@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $Nu)Zh''В{'P NCt%|'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N q`@ŭ Vq#ҵ8N FNCta8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@tkz?\0x{[M՛}wsvC?@kjQ=pz&TJ+?[q p]q hiK@G M~'g?)O%~H?y| /z.vD/m[~%M; G7>LFLl & EW׿wJ==Σ1ҊfkWsz߹Qu!bq5R2nNt(Hɯ޾&>;]=_pqp{^zZF,t8vd}]Y'}tIݪ'+DKӏg#b7u[4۸[!$Vvx7G? }] FWF }1uMcT\I0@L =>Ne[j-Mf-hmq(l{(vw=/It UoCyGyjybyB3#A7GgWom wܤd/ʽ^淟nU+ijhFZ4$>@6++jʲn5k8о^HYmVDW ؅a-th2i+Gvz5tp^ ]1e'dHW5nEtŀzAVCW@Kz׮IBWHW^]1VCW|(z֮{u(w[c 5+KQW(Cެ_~3VhP%K_]^*Q8^j\r^=HCN/2qxuctן'[KƘDޘ V.ߟ=H>`4]^ӬѐYMI&.ٵ4 jiQۮM鿄7iwNW 7ЕteJ,%tutmT\BL|?~?yv7V4 ~d!`sJ_j!̷ӝ w~ ě7/ ދ7D?2);w| so؞=6Iw?{WF쿊<`wiG 0d7` M`5%$q滿bwZ$ʡY$*Y4KS|\헚g'ډ?E2N,6d a!.B@ p -Ea@8MbY* DYA/Qy):OFY*kY B)fo3Jj~c<%n¦6C֜PSXۡ~bUX}7}f-Qt\g`c>*{ڢO>#:I:f1ZmYW\ٻ+~-/OO=q(ND@ & !Yp tmVYJE3ҳ͉Y3"K5>W)ÜhF`rP?WP|CWycW}D^MJ׃e)Y/vr>tr9<-}K>.gKkZSSAa/GIX_"\SNctyaeXM~:wۘf.ҧKswghܽ.h8BQKpƏPN'JgMU;9\١z;. ݷݒvZ%fYfF9 <,򤩠u@#  fw`<5`vWàCÛYMwr޹+j/;x)p5â.G~͈M QC9:T<  {Y˵pAb"&Bޢ$сU`*Dfvа }nNU,~" n3ՀjfxJb7 ?tnc3ry!A"Ng8ȥ.MS_oি8 `0gkj[U:/Uo,J"Vmk|1-yo>"pkqٌPX5Q i2ZA]#z&Hŕ!CW~?AZL5 J( Ct6#`z*?p;8? ?K*荥R#ޢNBJY.Dv)iJ0 BPƹIB8U&*$\gSG'¿HZFvjbIC?wY5nvڍoy]:pݱͮ;d빿n|!3AL!f}XPsV`EHH҅( x?4yO%F%LJ$)<:ԗ69I( lkmHRhc,NScp@C뜡DB0*Qi[#Tɖekl%t,m\|[nߎn ?nPu0ZP,fav6}!I4\YRaN-MDŽ,W?HC2kݳSj F2˔vpA%ڞIXWGEV2B3 X D0QǓE<39;?(2Rx6U)zbL %bl;ej nbLxDGτ7{~ؓԠ/L.g_IL8y2ROR'A'3֐ӵO?8ȻD--/g0Fm\D$jV!8 W2 o;[_M8]E1K(v)116Iqa{8.qT@/۸H Ȃi :VNzGX5\ɪYlh/VMcڷjf $)|pxb$(ڹFH 1"Hc8iΪ:0J0إ/șB6lyq¦3mJ_yq9Zy>DF-Ԩ `6xQ8練hƏGglgl sh[s-!r$'cb23N ὖZ%+ "*j7sjr^Yph-BȃEtb@9@L@:ey?+ĈvaR&V%A;kd4il>筶T"u R /EJؖ%5rKZlRC+_Nx)#vѲ|&˒ 3 fOyM zŰg */>2s-[-Z. d,@TXfEi&S̶}ض%\rЇc[>mK*:q1Rd,!T q T(G+-A=ܶ^tKrzФ(J^Rv{׿_ϼ$qg$D~0SS ʺȅh~P\Ka< MHHpتVbfw5[85 y12&N72i3@E\aKڏe4Xw[3ٮ@ӖRj–SW7h37=ɦT.Q̠R gMѱOT"4i49Fr$y] q+#UoP-YfxYߔ?Ɓ܎e/!tǼbWe.#;>Sw/~N%FzQ R w,7>i߭3{%2%ˤ*qYaVe)9a#X=U+pqtcQs|o%PGYXnh!p$Sz>ȁE|}O gVkhŎG; F>|XH㚖^Qͱ@\0{E>IgpS^jSE&w]P3~v)} e0J^ZG'bPf8x'TA\%;JrN?qtۓI왢~7WK- w%oyBD,!(#f[?/<˜ʡ*?F_\FCvp^Жʤ%?-^X<|PZsNR>;uDU$Ԭ[縄J\Cg?uNU<]kūy"8[CAsi?ux^+7q]p!IR_l`{=lb}KMͰf,2[RGQ+߻xf;9AeclnuɦV2Vy{0N#7,}9>1ɻHv+nkT"OVUW;;Qph:GCiv)J|H^] "zo712)@;ҐraGA@ <;H!LނH7vqr5|$ Ēj V 0gu&a cw>ͱ+αkp1y"&igK6:Ik)%@$Z[ZD$r/HDyR$m@j/.9JH ,a4hP{BbÃ*7HOd*>Ҡ0vA*-X'LC:#",_ea1$UтmT>X5[$.g\ \d8Q)DRSh)yÅHp#<G; j% ]mo9+B>c p{{ -$5q$$'_HZjT,g$d)t"Ϳh~pfP|@Z.L xUsliq|Jr4sgUwj2AKrW=؞ 1䔽[Ǥ+'Xxx?5zȑr~U,^Ck}\OWq6Yad~b{v26vrkW߽fY){=-̈?Z䦫-`9l GĖ \_/r|tku@vŶ"BZj ŒWS=k)U g-u-[,.$1ħ]ί)ls~#)iKx. 82EaM"5"xmd>u4Ie5.*51pj Qϝ{ "锨P%F5,>q߀W8_<&}^o3,P3_o7wۦ':bx,>^W㠃A ec/%%pMrқ-F" qv+ծgQ"x .&)뙔QLt 1L 1{{K%`u4KYJ{ KSgd^ 0?/h9ip%$Qc T4&ΝIu,pb:N(,SP3 eBY45! D3.951D-/msqxXhsI-6NLl*S.Zʆ24^߰w;U7"k$egs\* =EU4SBk .Y&E\mHSq*e$x+c q` pDUNGPTqc}]X2SgDnT1RB]j Oj 5϶xHSw<[4zz\};B´|AJGv-b2Sʢk"UI$l69V̠:oGM9qCX')lsb 6\sFDZVڦZ`מ f< F9keYpB[JFŀHŔP&µ'&TȐ m DS12T.kY$Nu9l[~]41ƳKc--lm"x.hpD(8LHg y/DxP:@RJ,ɝ5"7UIiA gEMI3TV"SEtU{˦\Yli{bvQj]mBhTHpR{R8!$AFEe1Is!Vx x.8ȳݲwUЮjН1S!p h G~dBя#V?;qLxGyA^9FTAI"AS$_gbT%m-:J֡l5%֞RZDL-'j[.JYŜ`q.[k( υwY?ߵu]S? 8oVvsqnOs \Ӱlӆ.;}z`s%(38M@) ͅE'H%8E8EcdT5 G!w@5m Hh4@*.8J#'$'(BH<[c"@!ʉ\Ղ֜I)3xsORD%' a(a0m;Ayx5>.ɇS5 Bqv`bl6 <>6x7nxhFd!F22>Œ#CcQ O3_AS-Ϸfy"JDS<Ĭ2J1Q(-5x08ׂ<BcĤ׹pE@SBfYS8INmh10g!CJ= 0,JUUzdrfpEPK1k|q=/pj۠E0Q6C^:锨#Q.81KfVz-^XlH>GISKĚ!(3e|?]޸KA~={m9c\9~LSnGt#;'n>s˟0h@#MjkyͥZonr )gPu>Ӆq4GGOeZE~%v|H1]QcEQ#w#RD>i)dN>jQod٨ ;{K٧6<$'I{N;Sf-4'4[hng+%zW'VA\h(n>,͖dn9ݓbvM'V]' tP>v/ K ²&K;nύ; [O;ew7k%6}ϭ=4gnT GbBTDse!О8mX\:')ܧ^<(68%nKʇ󙏠oČp`MݷV\ixR9S9*g_쫜}fVjW9*mN5 rU P9*g_쫜}ZW9*g_쫜}rUξW9*g+ ~1\E.~}+|~=^915~ >G2&TUn~5zsxiplJibrM '=̬9p*2}s &T B <6 jmvD)1 Ԙ`3F`5ѪD= C&NY( ;|ozf 6gݼB aHo~0LUa6r1^R3ViZ\ ]hлW]&%4Ypii0|=%#gN ,uSYt9.g550$ h1 N(!U$ -WoI2F 'Gńw2*5p ,yXx:#ޮp56v;חmŞM|^(NiH5PTҁ\$c`NF-&0 e2鍡jLǃ#$: !Jy4qt Oi1u:A7,fNs,ܢihOќ,i,rO&^HV?{Fl/ l|b`1ܙ`2G,y-;[l=d+r˖N[n5٬9bUjWm( ߚr7kk|J)$cW l2. X*l CX- *{ {M|:U Xu6@6c%lyܽ8a m;3cs27_J>G#tƉ_{zf9c,XHМK'; )ƦIx~&!V6~2G亞!Jy{cF l^&2 !h2 6S3O)zs"{c"/в Uܬͤݲ)66GW.0=`8?>G6Z4?6wrkU~n9z?UeDn/K4?;Niu M_ d@ΩTOR=HGICc40w]9 y(D@!Fm|T-nЃV9dǢyT`Q\p)GSD@@Ŋb\Ƿڥ }+{VcGa{|lI>O}PjT( ?$zUG%ciy$zUJm$z`s NBz<zEnN:`mPһ&P.6!'ĔdRҽۻπg@ (*za$Tc/DJ>ZcRBSbI\[+ tQ\1Ѹo _^Ѳ !&\^VGD[ΨL:cR\Ƣ@6+mwER#Il*p9O݆'/6` ޒ5>@Bͪ6*$caqnkT%бw&g}ٌj*lpqW94!j^;ž|5RuƘ.⩴ Jb S&Z0'#/|ٓ ^ c!7 UI&KX+}R20ƭݲ9=8뗿ZLPb+w\hQbs- '{tDHэrhtҌc28Uhɾbrۧ4SH<ᓠBs@.程Nt^ G4*8)m'!%u yeV{@ie * $JV% XĹM<VɖAÊM}36kQ Vܛɵ?1Dp]j^='Oӟ:onߕn֘{DPߖd<aVjI&O],+)A4:KӀDؔ`"RZ&Co3r`"ar刞P@(tt.ftɥF!8i @03;d6"nCUxHBKڂц =e%h ` )9t*$JZxҥ@ u6JP|"SX@U22DV &G~E66a). y-;'ԦۼS_^ Y%u*߿iu`9CHJQ1I> (gcZDZ\vr~O>V_ d7 U,r&U柵rCtn?kٌ{fmy5|vH0NA&:K `*MP4">8w<\'P'DgQuQ&DL{!rY2CMi4ޥ>WPqx j:ݜ~繐11L" MB$m Ξ9+ZR{}.,p@n^Lj }|=I47IeUo:0ػM2־o1oƅu]g穱 O EMKM'n*9n,%&lcke =%“YبQF_:dhE̞b PћDk})NN1i z DR)588}0`ڐͤwhZqNPutJ$ 0H%RvFhP:%FeFM,k̰9'ԏǚ֪V@ rE$ 5+4hK0<9 t[E$I{N3^3`M9A,1(ґ_ƒZ^hc\z7W6Iwa!^Z6ofyK'Q+1Rl@yJi4nm`:GyGO<>9>ጴ+[UVUϪ EAs >z ۮ2/Zg6T;njyLsz zsë_P_?Jx9s"K:V_ՖmV뷃/>}7OR]ǿu9ʖ2_:qUQWX~'ӂǓAbUCg_~^ino޴FXi#|0X]Wzir:ɭ AU~jܗDjKycɑn"/hu䲏1/ ^ %`4#͒O1>0wP\*"S 'Jq[U?v0zyF[02lRI8P&ױ$ a %/t3]G٘pv󄝛.|\0w5z״W1LOävw<TdFi|Uؙ} {ϗ{z˽_0ysQ+t2>|~2fךQE)I;KlOyäk{僶AVճ٦k,^obY\ hu@r_[H|n.t{ɋn) 0rlm]=׋/C_S`z34 _`s~q6L n`"9ަQMKfc QkHM_gB ryG8Qf` hPq I(L4,mA vg+F|"}(@VZ&#ZUc i>eY"!ZY&Q| Eܪ"By8LY3 kTF(+1GL52ϼAq&0Q esq|Ld>9"${NwC^SvGSB]PY>IEPr@sIFJ죃̘ BYG#l"ØjT]QET‡OBr(&K7SʍjWCu+jUqkrf)iQF%K*%]mo#7+|=| 0869$\vA@H[g߷ji,[5TQST5_"CCe\fԠgk_uAwX[KAyn/,L Lxڳmi.Wy\5@6sj|&ss Q>,ۓ{(=91ƏחeN?),Rz1`,1'$ L Ȕ`"sA9;b$~8t3t"0 #O]d8ȝ?~=7/狛@<ƺn`eH~x$̍v ˡw >شAl,p5s끃3ji= ѤVZyZ+Oki,J/2єNj Qϝ{ "锨P%Ń'а 4|ŽT8LA:|&o}\6b~"NLK"Sy'M3||O`mZY6lƞ%ci8Pb&9MP'ubbtzDzhp|42'SZ'♔QLh ),ct1*!jc/( )KDФ.yg+*,YLNW@b]R@_W}Õ\Fk)PX8wR('ed8rF3gH Ա"XB|,Q ڲxCdC dID(tU* BiКKDkIQĺKw?'PajoU|ߝ58O яe~ܭi\SGRrLoA9kl2O)އxG9G>:Wο ^=٩NlƠ|'Dbh ˠ)M _gbT%m-:J!f6̚OIӞRRDL5'J\\0(93v҈:.Aji?rϝIN'~w?#ͅ@d()q8R&!R@:O$* 7Jp\WprǤTU _b`;x׶e@4ZpʑYAZ$51iD.jAk$nN3jO8KNPXaPL܃a ç5vEK>\ޅf҄vRML}] EC?6-]{տ҈\"hLw-N#ơOWjg`Z4.N,B9{$!֏#}9ݛnrEӿ6 Էz|y? a1zua3w6';? Q£Q=6zlۜi:Dۨ^=bA i"R͕@{JcUp)ꄟzCORƁގ,RȼHBe3nȘ%@GȗI3,ฉ2x`ԃٳ▰6˪(C:oڸKuq,KO뾿 GƜ*JJ9esR͐~ iTMr~1o~_,+#% >56DֈdMK \DB%9ҧɑTO/sU1 nɽ8(,§r̆ȝ׽׾L?vWew%1X=Q L)2d^V:.R XkGN9s0>Bg0)!LIPyOw|sÔo$oEヤ IIiDE-HZU7")@ 1L K@xpl j8.륷1 zKx* D3.Y >gᔕ Ռ4Kj)q%+H&^dwgג3֕}Q44 9fo0kSgW zk^.MbҺ9vS\B>&n27<~fnOv1O|_N,36Fz!R6pΟbb|b[E~˿gn 7B'>Ux&[uip34zN^vhN[ Q%'aΦ^HVe5cub,c$yf1"u-sB!GˎR mA#Sə$RL{hMwkm> oM83>{8z)aD3|'4|JCS*\6Q.JDZ^J쳠% 7|NZ&7 'kL&:8Uɓk)Ρ+`1/2%"qc( Dx0؈0&@0+xksbDVzq9o#̇@.$!-Es3'EOarsf jpxy;~spdhsr,C|h돫o ' oQvA|;{ҊR*. v6޵-#_QnIH $1퉘~yWImTo_ Dɼ5J]-KUB ȓHl0$6-uHFw u$Od;LJWG/'H&eTaWʣ)")$iQ[-UI>o|۰RG|Nhl-;\LSRD(,Қ8l{nub渫Q`Hѧ>Eܼ.mѻb*H/f_g嗫dTtWLH@s]A/E^Ƚ޽|;{G^KNNFQʎ/Ake*٨#  ׶Pl49 bYDJ`'XFw:d4f5cA$&jQ'ylןO󷶆Zn}>$ߚ^6gug[fo=s쾲K?rhӪcUWGh;;/m&s׫ +FV7ڷ˷Խ_1w^rx 7?1?5ɧrQ˃,=М?]SUӯZ=>Oϗ[͟ 3Z,pt@)`i}~"FY3b'B&Ig@14K{a}z}ǧʭ)-/سhxUKJ1먓F74ȗm?87HGRnh6tUR<̓ʓS{}in͑Y c=wľNL]jj=QĨf퓵G5$2^mLA0c.Ft(aXDT2HM2ZcKړgj#m|YbcWjLQՓ`<@J|A7U`+_ *cgRoGux<]癈}VJС).р -~ͱ ]4EEw<LYLK.c$Cx]B m?8-(?# 4ήGfB|ؓݞ#4W:{s1f)#)OV)e(ʐm"=Jk"QfREBd$IZۖֈ6Pdo/zlT2`:YJ7gruMYGq8bijL1W Ia,. /6oNk,Hz_^zɳN3=}JW :篯d4^.R>WJ{45[}zpk{4ޥ_ 2ٵ6}@0AtL^m#aJAV%Lu 0Z9GV>`vfoY?'vO\#f=;,n[ka]grqf! iZ t;w>|jY BѩCsǮx( ޵>Lm%K5\&PT%d*5ǯZw": d[Ѕ@Ծdy֘# i ȹr~9ǔ j'M2FΒ{$r/=srBۘ2 p&a (.f]X@Y4ԟRf f $V*It Z]Qhni=$;ɞ'n[}xqq*o_46؄Ƣ7h-$_eGZC1X7(ͳ7E"bM 8`ERR AԈ!JEdӓ#E6dbe#ɀQ@ 26gel4Fƾ6(kongfw,8K Z :u5xu9-v1Qi@2p*1d+Đba'PV#uNޘ3R*I nl*cPɸK0DhγVla8HKXvѱզVz{1[N " scL!#_6ٸ de"b' eMaa@2{ʖmMʂU1K,d|HE'Tg[=6g=9F5G[}-kl]o{q+V.j [KA&+NX#~1W*(d":Qg"b,dQjgB 0H12h&5ف"l l6JE]OC! VRbQ*,߸$Mboa6Cll\9 kXE,S\6%ُϔ6~~,lZaV9^Yts>LR,^w  6COqDH#O4GIGIGShl!EYu8y!$N&Aº[ ƞ<'7z.'J27d3/ۻ\H*8"/R.H?!+sV1#]YRa/|\ v\$g^7Sж?R)fZzLnǮτ]`c׹Vme xnJX,*fYCrE -mDq7K8}D`wSk"E0A5oTI# &mA89x%J24I QibgςICE <t6)O4DjdLš+*lgUKF=?ގbW*7Qu̹]Agk^o<,nӖi'j%HQȺ -dR VK@Kܖ`S174I1[Ŏw͝FSJ`& $h+':8!QI1Rb6ϝO8 _K܅VLV NUoNn5VdqR"lׁ{}ba?'`]u,c'pIt˷Sog :-K>.E26|}JN`к":T;1.]KmX?zu>K(!xDN\E]ko9+ 3wmvv.&ػ`f~[|ڞؒ#N[nɒ%j%$fdTx ~sVDw,z->c-Id}B77e>Zh/Xl;u(>uZO'%  w@xH6ڦ道I\+?.3; iÜ.eݛ&v vROYaL@z ,Se5Ϫ5bF '5:'p%p!?X:,(ޢd GɓnaýVJ74]REu͝w5[Լ~^PMQ%ׇCQPva1=;-`4DԻA:`i࿞)ugzjIOD"FCaJJ,1b;.L9}{l9>O1!r)w aD AH֘mHj#Y92GRwԫ j(hWeqsmWS5a<ލVеZޏZ׉'0#Hdw =Nt "# &Pih4a& :(u|肧간w\X$SGD3 L<<X%p7fyP*%VWV/ 4\IUH GHa`Y&>@@h=O|zN|m' +88FKDSl1f4kI \S05u i\9-`ܤq99F)kqmXD\9CTg#v(XV&DZ Bng1oͤ&,X2g79CCz(m]B/2^ʡn =nþF_T.2)#QQ%Hτĉ@%3zj0`OZjD &T_Ʒ"fh7&xp|+!?g5(/r\_ғ/}YLmY#]V`!7uԿiL[U?]O);4mqy=4n.S݇37g26U=DsVi,|FY4Iǚk-l |aqWEUd=F~a^mT/؆7!`#{my7 ISh.ˤh**fU%u+NL]4٨˾juMTU;5NOU\s{YEX@{R=/;w80$cQ*M<υbG8_ &/mwc\"ŕC|l'3f8[XLy߻c/Sjڳ9 VHԘ9;z5uΖ_KnphvvSp"ui{K:)~xOBTef1zaUE2oMU/;y+S~"GlIDjL .B༴Hm*-AF``+LmI >m"FI%8CUz:y6T#FZQ),J/@/VqgSo+dZ&Ӛ3sRGpBЕlY7p{–5>Mʏ&HO6RUip)wgsF}Qi_hk^٩T:ʃG/۩l k+ӑ3͝>7<~Q^]q쵲2aDD&pzjnDh:vFx&9G S 0- s ۄ=_I)XQbE{L%%vRL ^B T1^ kBMTߙ-{v-v"mv:tJdOud:8m0Xs@΀NE/a7P8X%'4z1KDcB)7j<[Ȗ""( -J)h.cn}6q6匆nYe4,kWwx`_d5܄a]cࣃ &%+%r{x$3G^Fa}0ƥ єqturOuzl_^F4!@gJI*hЗ&Z2adN$5t) EJǒ!8UX-Vc&yer1h45[!-e6q6k7XFǻ2Df*`:c8jKfoPᚸ: ^%3f]Su o^unTij/ҵsB>k+LB# ?)\LoхqD|*]֥3,Dio>_۷ծg5?q&h\Taﯨo|w ucߋe|b]Sq w󡨩VȄ8揋nqqy٦B|]v+SAӖ[g6l\ry * r|rm#*-9dV:KF׌[B/#]H3et #ݡDUmE"c͵W1ʹEGUG',Zi)ɼ4t4D{S(sVA-m=3{-3BsjFИH&&T^ dGwc7՞Ywjꓭ8QCԯ SY|9`X Sgӎtx@xU2kέHjpFYW9S95A[,-FRy/wn+ xv`xl|K b`i5wTX32r G Qc b\kATĞUٖM$\.%6!`BLB1jA0fG)V8k4GagM 7&ڴE+[M&%I] Kzr?pܝۛ9b6ݚTI 5+'}M#$w=QePza\^[C`QgH磗JMBBX8R0W%ۨrfҦB5X K2/Pbo5R8 Yd#Q&ɫYIH7Y1YM*$'M9af / ?l KNxHUL0YDD"! Y_:W ^V{w\$6@$A"/1 C|'nfPVg:܁sSA;_t{E>*ǹ`1X9,' %2DB/tJ%kHfz ʋ0z nx÷M!@5b+tuL +2ߜ <9,K0*bʃeb6 tk*Th(fN<;/[/ˈO0$Mf L )PV% j2:Ȱr) Ձ ӽA*20n۰4%az(RdKJY;S_[mnv9wnYX4J[,Lc6J4SB2u>&Vy>]>y-Ԧ-}K zL%li-/Zv0Ї!@ԙ'I_muȪ>}zyqRn"')f}K n û~vWf{WڱdG)҄̋J2ŋ-`;Ѐ<ֵq&x t҂s˂ٛ[4Ckyں;y-?p {jyf|7~Cwmxft? k{/(~|Dރѧ;O-O #طQ˃=sO$5 9d]LXhKF-wa'0Rvnc@i+ )Ss8,PbV],Z9%G#.o8,Nfٳ;Ccu̪%,hQ͒!8_U]f-OJzz ҫHU OW$DK(¤RNc -)`(3Ue@l#:` P;A L<~/Y_Q"A'v~u75!rsTx{J,sJ/G9⩨4<0Km*C`SeJ+gI1 ]+mہK[zFc?2X0qql'7kGb絞t`q6'3Yqcݘ 7*ֻN}޴ֺޠt7vqсJ/Bj棜-Z> |Ai"OTH8 S ێҬGG6P*oTJI-Jİ4*UI=HD ]r,A'Jٲ5(=,!=PqbrΝ%~V1͐|_ϫL)Qg!1aPAzA!xf"tHu7mWb4~ȚMN3bgocpD`G +Ď& 2;u¦aIZ0$@]7) %|D SqmWWI\-E]Rⶫ+RQթoP]I0;&uKF]{+UV+RO)N]};JaF9="u3|<*ˎLJvuRrߤfbg9joo4V0 ݏz,J5XktRֻx8:/j76hό'1q̖%:Uip)їsOn&4`I7OeWg_OyHݏtu_2{$΋=Q!'g' 8i&Cʪ$A P! JPmR5NQ5Z2 A6yoU?Ooi:@]gl}R%7S.9U`BOۿ,n/Q"m@~ZTkgzȱW,yClyd-${f9R,wKU-5*blK]/Ky}]cd9{(fJ|k#E Jk)+,&پ@i10g4h`ә,\Wٺj4n8Z 8ކ;YFf]jU/nǦ}3T>j3]O Yq,|t7?N]O׻o$@ݗQ$nB8R5o\\_˿?hNā1?*;tUKL15R4-b8$gɘ9g_8gwkCN'8DK aE (0c1}ō>|(J -ŭP73f띹#*K[/#FBMUu.C{鎻DA}ܫ E -`e嫸쬷U -/_h)ꠅWr].+ĮF?7Ͻ~+-8o귏}O/?3X?oXyZI]o_ĥ6Pgiˋj0h}[\u V_ۃJSST)EtvX?blчx[`0*~; jϛB6#9-D_"pmJQ2}Pސ;رDyu:E a~[ [+}iZ+FӊX]J>q_EӎP8B1\])7J)ì) E Jqm1RZug]MPW1;Jqɗ+=Kc(u5A]I ҕW(ҕ2+ERH̺tq̫Ώ9TW+ŵJi4w+DXPWtIbƮP$u_RJP1RrZWJ>w])%YWߍx`aoqc҉ ̭u#tųz)bv@2ӥ^oM\zs6&%4/me3TЂ9yA7Ԩ=GQ&LScT2ֺֻvKEkNMlEs=C&Zп@wa Q8b(\r(ù2>4LoYx.xf!z`wl& gi&z(pYP(s;~nXqD ҕ{Jq}1RrוPu5E]I38wpA}`ao皹uκ|$,qѕ JisוRu5E]Q 1-ys97RtEɾ30ς$clq)x8dh^Tƹ3cs9[ځc3ٹUnsh(.r)}=hJse} <%|`A~UD/ndD[[(Fӊb)VZ^Jp5}+n 1C)RZuV f]MPWCJc1R`JѕrJkܬ >s,p,u֗+E2%|^KWD&ĒƮm9ӠAi]WJIuӠוS9A=$Ѵ1+d߈NQWLq,HWΠR1+"J */f]iCcWJہlU麂Yg&xFn?5ןx}-38ҙƮ``Y8O鄽&aEc'5*|!xXPm ԶVh=PR0׶m-fd' s+gP\E;Hw1iZWT/ђ]Jn5V1˺m1h]9וR&I̺25npbt%RtrוRܺ.͛ cKD/^?+m~9,,3zFFʅ {Dz8"F>dz?ﳦ|&3@5Ihߑtoﯟ:˄!Hi?,[/t^6Yܵ@䡱PZAM#N@5$+w^^oo#Y:}hoڟ/nr?iNc)ɐY^|lp1ԺԲ&b][UL&T0UGkٓe^ VB֙&VB31ևTUg;'6]0 zKz୓Dz0~;Gu`h;H@(yn#& ':SAcԒ@NZ 21iCۦlbmH.%3" `%M U)}EV- ÇOI+y`6 w|00t1Jris y)̒|MQjuM%-:6-ǮR2@h`ƈvS_ϣlI&H@XYcj HI.2hDBUuUb#%HA\խd*Ru:v!IFn; PA $ Egh")2I68`k$]MDֱ(ĒJ0:3PcH-Q͝`ڪCMS19JUR'h$M"H$E5Ps <ԥ{ :HmIZIʸTR[4MjiAa m#rI 0u.X^bpLNxKXםoqHc md10MhkvڀJxLXURշtՕ2wBbz?`CֆFuU)<&EqԈbZ~uu.|]@e gU d/XdU%; 7CA 5q2Fͷ6Qc @ m["33f%lCȝjQ H ̚` UT CfұB8;*Vj M e*B@Q"Ł.0͑EUP=kyGD)J6 e*85- R+"{/^Z#. R˚KEӘym -8X)D$7%^h8(:&Y,"1۽@VQ=r˨ZQЅZ}pEФBufpOA^[ĥ("D$' GcEe( / Us7`Ć&Ohf.'2ğ׵VtڕYr_w :$M"X 3  b:PT8xi?olJ?n:@G+t9҅AhPU!30阂'8$;ŗ9*X-96AJ$ڌ"9d^1P>xmBLtq?XcGSQ&h}V%H Vgx$ !38hG_6Q,TGW>AՈXżۊjlPl$Sa)F/)X?wy2WmK&XZ> ]{ #BK|u)}j=Q ѡB- $pZu $$5(ڽ,C)y,a[}Jh vscAX A%DEHmhw Q5MH#vuCJߵƢBD tdiB\=vEQLIF,FirL  "Q8TDYUPբ-`QypTB+; DFt)X_NXjͷ]6@gѝ4MU#OԀ,'޲(m@IKފ6 2俣%0lըϻ%e SLЃ![j&1ڹ;y6sIS߀giin9Qe,l'!ԭGwnJ''f= k`= ΝDn-,Fݚg)DP'%. ĘG2&jhGK*j7LJȀ5Cےb[Qer#bns"NJd7fdd*2(e`ʱE =>A(Az[ZT}zT [bc-X@|+BqҔ*SurUp- wHG|F F-Lj RTe itabY;p1-й'GBF*qݢƀVm:cWjҢ:jփ*UҵP9HUI21sEm9kZP?u߳NKރ ]o9AMkA ܭFb?ڢ>u" (AF6K(Is!??m)AH-AQ%#pr [/gg[[ƈPd>z2[+:<Ґzf-|N)-]:rί_:K4R1zPGoi1}3_pr|q!?G#j :_(W/XҙV^Ť{.q_!ۿ m7ѫ4}[gQTc+Mԡo;Q{J hs@+4սɧWɔ]~GKDMsR3stZ8F _3*vhr>כu5t\^>hV7D?57q]֠|;4ϟGKQ}' # g96U1YĦml:E7;j} g.h׀-ce[P-ZmN=J1f[%ܑ;~ڬ铯u?n}M(7 1;}sʹn!zTHرՄO=?zׂޜbpnUVoѴJ0e26*q4`~4U~HpX="v0tEhɫJB4#+=7(v(tNS+Bi%#+kC*8+kQW@b8uJ#+gC* 7+ytE(bzt3<{w+m8bPFVWAb0t ]_. Qzt-yEDWe0tEpS = GIWKq3 ,'zm|FAd@__j5_O^k(}Zo`lhOg_*BǣBn-kF]:zQsUbhZe' &4gK?n5,VQW].A'`˫)B9:FBWtijs]m~}=@ƣy?5F4Gm&Z.j&^U6zmzSEg>Z*4rho޿m̽磿{\ur'*8t7= c)aoH^~'oQZ|qahwJ5uLxg;ޛkǚjvDgwf8ܑ̹_Z EKF0Z ԜxfX{RNN2I?w+x_4K/һ٦Y-.g˄߸]nyS4cv "?y,"cVm:EIDoMCPH_jAv;y1wpI pzx89nG^c'uۘ]SX`JV8cAՃQCQC=:kGX(iQ'_T#iϥX^v6j1 &[Eۡ4 iPFH-0"ùMhO7P᙮!]/"9 nC+BO^]ҕEb0tEpc ]Y?u";gzԡV2e~/ r>E'5SGL هg(]hRצ*Y0}L0}u+ Ѡ^-=RX=}|> ѓfm;_b1I-e2IPPT2-Uw*EylU+H&3%V*m:ZDgm;cgLeY2rk#WESʼɢ<y  ^ҌvF)3vLI#e[9nVŪytHeIAɨ&b@U|%4@*1ȮE"gJLQILb̛RPt_\2`b]gxMrg ѵHף8yy|^6L6mu7}ӭh–}6ftz|w?듟G8w{Eo҉؀[pzb\I*zv=?@6u4grK-b]|bpDk-x},[vCc~<z.:?"| y<-/-m[ut5g (:s qs@s\JP5u &%aeRU46\ڻw'I;;ψ Y"}2(TsP2k]\.yk%1(JC:FLT*RpU%ml!`K]lfIz7q]>'N#wKv?w~ZWQ.ސsT_`jMmEi bBxU4SIM:!~)$D% 0EؖA톯O 䙃{RpghZicj9-戵-l6^a0(+fPTԦ"N2%ƪ tśҥl,j뽊D-$ RD~XrzRAdx8G. ΅]9)hm#T: \f'-ysy8KqҶS-:߫ *-k0pb.6Wj2Lf$o7H ).澄&D @239J1`PJ맪IR)X/x%UjM 7,AYIYL`2Gި\&*5zPPV;M[O7 J΄o'¿ b/a Vg1"j !1um` .fkw* }6Q>ة)n-{x{7یc-NO#ZB`։|AY mIq,HR$}{GrPl0(%HbKq#F6*Req!mv&R,cg &a ފ~b-#Qû:jkH/vN1ώW 򥘥ZޚXbS Cb^,~4K|![kEoz1ϛ2D*i+ʽXVJ}VK  rfa}z[GKU($si( hB=wppg 4ڣ=+ir|lm%F9v殘`8Gy9b-,2>Jb\Em!t?ì B '*4EIH\1R!U cL1RZ29*nHO¤kS:)=Xtʖ>Qϛ++[>H념 XdHv:%ʚ!1h@`*Ue+ڨ) "DV :yuTW[z X+$DCW3&E&XkU)A*gMM-ꃵq?='m|ÌZ_/ے1yq\ⱭC#fsj5CcCx3+\5S/=jZIDeH"FR0*_4vbwጪT)IPBMMmު%ʤ-%t !ze+@Dah]N5n—WT,G{J4dyiWaP՘hEkIC#J*ld|To:郖%Xjd+ UB,kc -{wD/q+*]Mf Ki96NNLil4Sθ_9߿^USjo^pCt*֪sd,ޘTw))J&ݼж6RSK5J܊RC҅ UEO)gR!kcoMÖVf Sml ` nTؐ,sGƞ>L"oß7f=Ů.JJ3qY-XTmQiV'9mDZ^J-Fߔ"h*chd/- &4gA:_I UC 2{R8-v<mtjjv/g9 Ift1Kt:$\EGbnCVM1' !32T,:lk IVEҘY 6T-23&{o&a{8#˼0 5>NE E,ޔ"Rl;!dRAHV` SjQJ}ˏTR2dnLEye]I%ebS,"LZ7+-w^<=Nlb\6JN`E]Y(P$YSQ*Tk-!z Cӟ<)@u׳D˼.7ksB?|s.hbdjFDZ"S>t6dU RY )n8ma T(zy7K>g3U_:E*u˿[B5(i'iXj`V怛y婩j2#%%I:ԧdqҟ7;I'eqJQ^(3{ژ8~Z2fLydHUd*9RLrږd` TP|M΅ZN]1t) DJkY x~p` )bd%WHyKT-AK69XAQ{d9}$f<.㏓+ᕊݗhxfjE՛e'UGm q% 5Yj_2".f$-%" dP1O|VdѴeZdVgbe9E*9WʳFPdE ט+pYYWq6x^шfP e=f+uR)+XMTo1M.Ia]2Z0J\cD?}a?tT]|qj ZV]"`Yw9iRȣoH}锠!z:΂ E֢$~}ZYGFk63qma1]먏 }+oC%?B/K]7ef^8j Yx4!&\֨).f^}O߻gf~:XfcyxF»yjQY|~ \*dB335R_+Q&_n %8?M;z|[gwD$U keZ\T뢶r1t}`wQ].=a=Q9ˤ`z0w_}kӿy7ߝRfN_￁]OW)`ƅ~^#oj)ŒqˮΏ׿3~>M-'K}8onǸ?0.f BUĢñ˫U57DՒl4X^&/Z*oG9\ĒB ;,}PR:/c<)#=I$'&Ct;birIEv& 94s`)::d*ԲIBIGᔉYG 5^D i:;Iv)T[k|7w6Sɹ\Mi Τ4pzR'Ne5ޒ-`T)O|~Qy0cbnEƓ9KJ&E&UD,RB 1:[F #\qAp(|jCѶ%<@[:/mqW=,^atԕBZgBfψqQ(XPkᒏQfAܩD#BWɷ5Nw.u "FU޸8H3j-+]DUz221u~_E% f|QJJOj8 kGowWIpUY37Jp+2GËY|޳9g0T"7I4wC*޸4FmUdF-ƥA-*KC~(^%LaJ0 ^Y(vt|}|hxe ^h4r?\#x/vsb$rTX\zm6|*sJ$3efw-NQbCDѾ7:,OoU"41e2ROOT4 "V;U bVc怲>)X Dq(WD*1=q`(cUp9 OSzCϭe; q,̗g)-\;Rjb̤$2F"^"f͢gMGF}5)n 5\YQ[kFLbSr⹰\UciN|Oj=! 5|}x=gѥʲ-HȲ>6$VlM˅ jS9>һk>$=heǗRsW#/9Sګq}n\~w;./jmh\(GuN.&eJ-L 7J5af^)xPL<~I>k>vYBifr-'@RT `%ӔVS[R6EHnDWؚUT4-u+D)[:DR·Ь?]`͛sB1  Qҕ&h]` EWwGWrt3]!JeZ:@2p$ݕ^1tpucA@+;]!J[:@1MDWSB nbu+DiZ՗CWjɡWDŽ j%=jpŞOW]R tZZ{sC}sH9읱D?}fSz&%;Az`=5=w6zL;U{,mvܘe7ijQR-D貟WhgbtU,Rh` yN^2ܟZ_py#6s *@/6c`p`hdvd- ۨ% [ 'Z"ÜL '7dElyA^=i:xPQ Qjp}Z%. J[eFUfC?g!>41]Wj*jθ4,ɀ'@EE_޿z &F> NYP P{B Tk -bnA`MilichJ"Bӈ^(yJ:D $B5n ]!Z[S%Ĵtut%Ԓ6a͹C42u+DidKWHWYaDWX B4"t([a Ɲ4SKCWlOWHWK.H kB1Vt(á+ 3At97++lS *YwBs;VKWCWVPUkCjGt(oꋡ+c"rt`Z_Z -d?tJFEWz-]=FAg >ԍQ1H/DB,~Pg+^'1n3g0"Q -L2: eonK13m`Mmum&n{-3 a?aX2U8(+[X$K44-M MsɸU +<tpn ]ZIx QTyt%06+LS њ+)M!gDFJ$V9+US 6fsBu˂ճЕ2&9+chwiۜ-] ]iK +m)!1tpYcTV^w(%mh*T;0yK7㴄%#xru{F]xؽq֪fx3(nr Ut) cJx[YaexJi>t1 4chrPQW6XH> e竷?'JLmhaC# e dͬ}M|\BΌnl4ND*"!Ӑdk(Q7/t!LS>| QgB /O ar;eɇ @%<؆4*Cfq]+&O _hA|r݋)&m a,,U0hwJ_/ےHu?];cGW~T=0|~XNOjy7({Lq'e^g]WVub^2ގN8;>߳:ޭp><6gmPVosf¨W O]u)XG9VRN1䨈33>2[")g8{Qq>I-81tᲸǤg|wmmH<'HV,X̼`k"y$ىţĺDm99HP<[UEm|sŘ#* mbLT" cQ!8ˢ,j[־8u^qY-oLݖ^wwsˏf"Vegz33J`/ľn>ޜc2ZӣlQ& 6Tb`uvuǸooRw75}m3DZ~jc~u]Fz)ׯZ춫=Y}}^ݏ$~q_{OT_֡&<<߂1S-$rOszw$яFr2p7)SݛKyVԺQMeUŻ_y zO*FJ2RjEl6nU|wz㒿-[--on¾{zYgw)S)1kT^駊鵘~֪3Oi!!xS_ԗo 0:&RR:(6Dl6 =Tl΂ TbNgT31%X!#H1&ʟhʆ]>gk} ()@=Y"R"W$sr2zҞL.(ۘ 70 B7_s-`S x2{W mDRӷtsW} mWJ h]&_=3$wקwI=)3gtcZ3sڜAlNsFdD)@Q0, tj%F/YE&Ȓbj7 QT9sJqǁ, 6٦#Yf3q<Zk3KrKE*vr5sq_H[Dz $s VdS :.&ǚڙ4TI R'c[Bf KJrH4 am)f&K,^ [6{H?]M<,sZ,EA dEe  B*BXVRbywn#oX4ϵhޘp(O'?";F|uIFcE]֨!ge%}W)xן!_OG.q:ӏWc*|$4 2yv8Xoףl6+[p%+P-q3a s_ZqO÷ " >ts2\^ϯ ]|_^܌ g9]b.qw*`@}Hb-1ͲT4j{T{j-jz`]i z ֐?dz^'۬󌡹沅3"N!]Au`@zɞGr>e}X^]L4dŊ$C("Z@T*,=yӧ|)D&mx7y}D˞Lf Zm=zx A J)|]`Ru|xYM?6Ado%.:P.t>EӅeR҃iH)08ה*@H)ju1JS% ֘Ѣ+ޔdTP :Y #0RE+PHEVyP٣`Ab3q<9D8pwӫۥ6olŎ;crQf#9'D[ZA;S %, qM[m`W`ҥ$0YE($Y7`d3|MS=zV*Y"CR&ZyQ(|L+AY.?W}sEn^10bDF? v;c[8(hM(y@0`T|}&l_ٲIzdmMGFUdI3Jj8NSd~/,_[=v)7z9|\-=pݨwp<^avcy!cq@G/ k^Qf*0WYŕ\[sūχ?Qfq"kCjky?[4Caeo̐lr/Bf~E.9`ѥt!HG(4=_eZ)sS.'p1%p$D$%2Vx`VŃT3qs9։y0u2H;jj$GkԹɵ))L}bLkc y>AGXmNFT,f&9'YZ[xG*8,6wI6˲5~ȁ3A<=f ne997FHOyhz2+fӛ*u ء %}`7 :d@e%.@]QF*1jwN_۴I!g ML8kQؤ'u).1Q 5M':]iH۝3}2{O݆2w}^|S1n.mPoԣz'$ӧu~6III]_MHhs[ ѵp&r(F:P3Z:WDPƤR@GoZ4 _3ߝCeN ~KFݎ񘄖M6)6iUզ.ZS\an'@/":l85v<lvAkv/g+ADd `-hsB>-7.y7ՇAfȌ %0:;5) ^Yad1(TLIuvufև=_Ux68U#RcHF4[dhdH^8AˬlQ+OjPE)%oP ,q6,NTz&2<"b̤8흄XZJ#/3u`8떋lfɉzt[bЋ^71J"ÎuHR OhLVo))edM'AB/>l6;NՇXx*lp]<ȃWq ӳnll~D}jT}7\8G~M%nE'_8 {9aa3D[߹LPE%D<ҁHYz$S;YNN5eTX?o~5̪[y3ǂyݟ㘻-Z9ӥS_⿝m-lE]5pPl㥕|'%JJߑ򕆔' ߱^o-fcTc֝9t m5Dp)m8 $ԆcDĶcfNS;엧]4ގT2J"$Y zG xdH\ R)NTAzNR"ipqRcd2HD"vh&Αf6=#"*\GVIb[ w6~_QQ\òi7uG`DhH3: JSu룑"&Hd`ꭋK<[1|ckSWzOP6?{Ƒ@lr vcxH,+W=3|#QP"[4k~]U]GDu0TDGfEwZI%pD0A$ۄvc`㞇^(muYWXX"6DR+KEZf!ퟜ/TGJ%{^H#Hh-'D2J ASRI/ilYFZ:IA6q&1F2ZX>n5"VP rrTNz=/};~cbmH=)p߂Nחf"캗pFYӄ$.V_kܗ(1ʸq? ʟS_$/ -#"9M}Cd&#; dE.:e0vq0#0~Q0iIuo=ٹy9E?%F{W :L|ai`v,C;:N_:q:W駏goO߼{?;{&g߽S/0RZ$:K&Njtqs=3tvP܆K)Iqb+@?lZfnZ&+sgB~lMjZrje\NoY3AUdAV=lXn5,Hj3X=,xit1<'"֐u!/[5:A ThRq831 # SUkh8=1/i*<@9I >]EI%8CUz:Y T#k#(oAK;|)77LLYJ Ӡ\XÙ_(?˂F},Ь.|p>0ۗɭz6#'lF-ۥv,*nFIFeg( #g۝FKU% ]p$,g8j:/׷-,xr'j,óiB8 ln^YaV|81Y0^x ]z|azPV-CŌ vcvۿLHk'UN9KA. Κ]hr4?WEqx.yR׸\0!~1 ~_?QQ~HT(bR#XO{佼O0 K, L'juNTr i`\%r9WZ*w*_"\ # \%rJrpW3"LHWQ*Q9cYlB`Gpro*+FL*pԭtJaM>)`?per_*Q+vF4Q)[%•&}RD.J򝗮 +gn"O&SEeWf0L薋iUbGRkn>#rm"/7|Yv}84͏w~z3YǡE 7'+FŚ鳽r"#@r Ghゟ'.XTqr(r+˜RQl@X̵Q*g Nɭ`)$ŤU,(Ds3 q6R6~'l4I' D9 &ng$݉; #A3SB՞@1$=b+t@HA2d~sVk/@1[ lòWc䬞dr@E|T)cjĉy")i9 V"ȴյ 8HI BQp$L_>L3$_OgU N!1P/JGjEC,DvH V۟DަPj6y/ Vp9-g|7Ϥ(7.0W2`,Nfqa0XdQu[n,oC%Emƈ,OU/wqB'L樁U(ÈM! V=ɪoު ^6K OXQ H#VwSBPP.6߶V6F*j1)b VWS( mbLR"I,OE!:_9QZfN>WWVxyӿ ~&/@}> .Uj[gWr,r׼  J At:KӁ!ڕh-ϷI w}DR$K{b0O Ry_a%Ⱥ 10Eok.'ML`bM#DG/QFsR*"%X0APİ8w*ŽT=2FY {fK>Ȣu4`1S J2E(]b&m]%EiYJY@Ueb d"@VLE9S렝Vܮtxҳx̟q<'z|*6'V5+]W'gdz6(zl\ \I}Uzho|K1q~X:9)xcd 6<{$\EZD-C%qT3VG) S0LۑWiFƮXcAp#Q}ie[V|'.On~lVw.N&|#vIbé$B`XJ5]&Xf%:c2uIPuɷ]wC5bVB3mҪ¦.Y@ ٩]5E( A,x֫Ĺq6L}AfcWQ=0mʣH iP!V-%(fcF2IGS<,LbBo$P[Ic$XIb((>1Eb\l8aoԯc*0Mj|슈1"D1,!CNhI&t*MG#D)TSD ^ V$dXP:;P fL5O>%80AcDl&y?52.λr퓯(AE.xӈI| IRc2H,%9!(6dd qŇ}fcWb !aw~9gKU^qJ;T Rbr2jۡԁO*NzXt$ ~!B= {3 aPHĪˆc͚rY 2CMh4!WRvרߎBdu=_yy )Jc>LbGyiE$m ΁>oMH2K R590 n7oK29Tu鄵+`lvvlWn`U[nYv*Ut"ݹHDh1(jKoSҊn{P+QaӪ0HofvS[<^@eoIָ=Re9Qm( ZJq rRa1i9R"kpqч}!IxCdAK6ĹaA=µȵJ2fyӓ-+ۛxf. r9)O:GiK r(\Cd SA$D*c cl+:fKǍ9\kKP|wR:dvp/T}ܾg_@bAY铆tJ$ ?YZ'Sf@QYoQWB,a?/XSӝT%E)P6oHP2uٚ}M@΋w2(R[E%(FwN3A1& /ȟOIcI ` vKQ9,F]V7/Kqf!@}mp:OǓt3(^NV% 1?m*8&g0cߓ 3ߥ^W}׉%v4}|~><HAu%#88tVkAN w4 ooEUR`& T^#nplg!fUMI4],*כ댧<)TO8`%]4ŻK':letk{-P,߿ 犣#9ZVwGguhyyu?{7o3/ߝ.V^\$gX7¼2-U~ a6Qn:vB Ķ.|ͫ-wۚQ[1Rnm@yJi:1{bgٺmߍ7vkJ%]5Ba꼺iaI_tUFTlՇ.Goy1Qp}߿|ݛ7T͋|xO [Ep᷻ pMڛ:Aܾk~=wx:rW794qUi-,?wU}im2u_lu6[]څ9]!3؍lB}JmmL$[\QY\ H(0N3,IĿb:M' ť"r;kpz>9Pܗ({$siˡ8ۯ]NG|/Nt˟>q>[G{lX{=: _Pzֽ6yL5Ul.tT٘ }CEw%XL.b0 )zߺK  ڸpV9]=䔴^%R )Qݟtlt섖qCLgеoz*a[EAgZ0}jy4>vHC'PA ct5%E,9)l}{4oH|~8Nm5ځs׎o u[&$ƨlfD6h$KB(uNPs-`PL*ǭS|\]#^|)N'xdVmv:{?Qe.(aw <nWG¼%b^{P!4s29iuC6}[~[_h{}gbZV igZU)zhJ6`]2I($:4LБXWź>X_q_ڞt)Hom}X+o쯲HZ~1ޜ{xf6iNٟCp?K1RD'bIJe2ɐ!˚Y+\#ˊr Pj0`PleQ$r2p YBQ$VY [̙ɧBmquU+n~̪6ljKD1)(4 ocIBKSTRŒ)) ]&MQ'Xa63;YzBπ<_@kD&QT0d$Y$MQ2AD.cM PI*_`R%1px|aO_jUW}>g--rڊZ*(͘$R(!y c(R 6:JIaE`L:1ؗH+B?>f֪dI;YGwmH_aSrFUr\%uw{ͧ˅$["[_cfcr(ı-s@Lh4ݿFq"Ֆ1icFI7z@_65صˌxY2~rd3=_dW'=.Dl[1s{m^7^0k%{Dh"Z_[y4T8wBlZ}ͺ6-<\˝S 5ĦЭ!rÅ]ʈ񦎨&5{a6B?bv<,Ib/_FD~3;ZJ@^j| K>ս$W5OszDUs|?ʌuЯo;qR3@˕σEC3eL^PaFE2Ttw BOEfsv[ ڄZS>ǟǞ w)e%xC\,z?z w3 T,:_?g7#U?q?I5!mI%jjs3)CY*~|hUym8qN[rG|x]#$Ln+Xn!d<3>9"QlQsޏT쀚}ikJ<;ȂT2(;ĻSКkRK)qM}}"eBC#[:IxoeӌS8^">'nSܣ@<-3F$E o EqޱY6Z[&$߿)M aV{3lm޺,"{Do-/z`]|3_f"pHw}M5</@Oxy-E;Ӯ$Zc1 Igrһ )gѺ#M p V^@"oYEv0ۢ^xqd9ߎ#b(eǀs\H5'7IY;7g5.V_ض׏z~<C`~~4%uA4lJqTQ3/aWǛΰFݘ:eKc/oĦgg\J%‹b2ަ8t{\ݴl3EQR\Xd$FKQxfe4LXy#3-Oˣ?GbS#UǦi1 ,Ra|fdpByE*Ҡ[Gj4ƒqLMku\+1=>lH8[q`Wg?/χz4$1a-2i>WmrQlxa]| 8DžTh#aMJxH9&=Eۑ7:M *8-6x$9sv>q:K^J]N㷤E9 SUhC & haeHHʅBPJ -Ȥ3Y+N>3v,H}hi6i0Fi%%XJ7#*KQĀjb NP*D,N@Fk5)jF5r|BH|+de8٤ձ ;;OWmCĭ؝%[>bZ;O]Ow8;WJZ^+6+Bc>5}jOcsjbz鍳(6z.vmz #ݓxT$(&@Kɳn5+G5Xmn7l$LG#uJo{:`饤use.7=(*^>F*R CᬁU- lm%'nEQmQyBƲ vlz0{w1H5CNIMZ, jD9%051RHB 0,ʹ*D45*.hCS0dqk4ÞIkW1).%fNp;3}k+)G2BqMPTrÓ>i5 @@yPַ?z(qo RR`@R'M\ZGKvGk|)WIef9 @8DV48ÒVޠEqIQ ?9XYYFh>2`ռMsڠBOF/V(/uB0<o.(bd*YH"ģNkE E2z~FddOd!E"So_`f(tg{gG-<;`7]`.(f#gcesRuKʘ>;=|Ls #ur45'0D<Ѥpx"3;c"x£etT coNݤU,qzݑ?s?̼jzc7v\E:B,׽qK9]=7-}6?ԋL=+|mI' Yt2 LM1ԂRV-%j1ݻ(TgRLRmԊ„mPP0h SE̻T'cE"vIs.H](PJJYa]<1pieZ2 T8l~8scc IJST%O5sNo$ ej|5rvPBK%$N|M% n\sjߕr,*&|@smVG_0rU ]!="H_^FuG4lmwdAYAt7B(_DS9*uFQH0Sl3ǎ#n/9SkةG=#.;zhV9#qF\er8qUҨN\}JTL92kqɕ\UVSWJ\ߣIA3WHalU&W`VSWWK` 2.NB h_5h*ǹ/7լ^{RLg A9+L]PFQJsry:%{8`Ȝ=\ॺw8 -D:\a}J ɉ7xpD*">Qei)7QyTN<:b9Z2M觑_`0?oJoAJ=Fƽ:0|n5nː5|~h0?q(V}ǯW nXO FG dr8]2 Ni̢r66~#.W?($8sX/͵QT PAd")!Ȩ9e]V!i {2nY3riY*z)$Z25)̈́ dVSg4/i<H'9dP[jHM+>=q:^@|ruF]j-gg~7]d09VҢ\x}M1\D@*Gqf E S4+)Gϭ~$1EpMՆ'8jړM\1bc;! 40b4MК3LIl"8*E d٭L N+xoecmmL3R twOӌǯ--*K^qezM=D7c FTt[ko/ĠGS ~N<.Y9kξy&&vT '{ƅyBuhU0^K\cIS8a:Y5/zfڷj 0@Y>8G<QĤH \vJdؐPƀВZAKZB'!mx1FЕUV78O@n&L^`І}MW};yn/g6kA J)ZE.ML/ =`<4`Ǝ)NO9CRbaxJDsU5oS13NI=(s<(%8x%B&T9M<:L%r`Ak MĴ!FNBQ#g}1 >9%QBÓr8yxto !$&(/  D;$1.RkmbC(Y6ra!-X*TkƤWJؖwpkk; &iSٳ_8 6EiTn+:reIgY2*UN .uM1ZQUbA`[R5\NXʂ LRZ G-m)ԩٖNǶ܋mKJ;"ut2&bĭKi8L"Ad=M*Fζ|ᶥ8Mi_N#7vkZsIx}KwcsUB[O#4%r j%juޏNսͼTCxuڌ:ѭ][Bm\m$s#]vkt݊6?*P;%6ٻ6$ eo-8dl&ac W=CRԃMlڦ LsUU V*r{ԧm+n+ԊS;C!wv<ƃ]X[ h-d3TBrd#R攔^X^v9zIQ(Kʞ$"Xe B+gh dAN%ٳN9K4!U>1Jd5R[ T^2]x]uz˭ȕfZѢ|&vYU*71G}TPei 82K,(9Z^ KcƲ؇X##,En4rCQgrӞc I5$= b~HU}< E{ vI_Vn?p"1# ?ـ9`2 d \[Z ;]1ٿsZ6 W$23ȼe$#HvH[LHaRʠCcYƚdD R8>4~RwJjR21Q)<lH`4^x^/$y#+^\YЖ(,ߚ, 4 /0 SffMqo N6mf#I_y))K"sG0~~nXi.d0佾I'I`mET|WcGn,18Gsvpb  L, /&S³z~o $ё&9̴G KAVX2tʶAܠ ux] kJjFյŽFn;Φ /V;q$KbŠi7]늃جaV9yqy.[lI{:[ _ۍX*!!L،Z,XVQ3\ `z)k{^/u*r^'ŏ (+bG6lBؕ >F- +j_ vĎo_ͷx˫o_9n_WҪ)FZ֒`Mϛp+^mMA\"n;B~]|ۤX~._·874(q&UilY2<nXcfր6[t-ܠk|9lү~YgKvJ2b[}Ms}X1kd'M_d 3x'mR4s.ǐIcuL'-d )I>a.u+=% G ju:"3  IyM6 +8U*fx~_g} Ngd6{]x⚯mi`+7>dqfz1;;<7^ܡVCw>Meӄ\YkT`6t-ɠ=x_,0<|y>g1,Ày :y3O ;Q r̳@YvK`Q%I{qA: XNEԊ'<&}_yu>历 )`Ř 2Mdz``I^-a%:If(bHdXI.#w{*^RvGR|bSJ'-* R̽%Ƀ@&`ʹF>CC骲+cC>TrSL-ug11Zv5kP&HB3C m!p1 )#(AeldlF>HW}o# x3hD@JAS?ihq,\mcാH AK2隌\3>ȥ43[%Vޣy3"tLZ㳦]pTNFZ~ s%y=6tmܵޜ_N=} SᨬzM5ve|O+Bv?򠪮EAͥwskolplwZy[|Ko-5rݻ">M q-kE}+ *:۽4-3OiNFC\hYm }w V)s'>ARUjk \%^ی+\ky$+I9Q2Z[SُPdBA,V9kDN{]]6}j~פӯx}P!ܯn>l+^DUʺ7qP~Uߤ0XNxnڳ%Տ B+T6^(|1I+Ywa{*ͳjnp~NU9)k!h`klnяݥv|ނ^O;/Vjv,Vݐwq3:^rκݹdj{s 62zE-o/|~MY<'HHfIt</E0xrVl#WwRQu45^d/k,mio:gs՞.J=](Z+$3ݵ̛^7fַ퐌_7ayЁ n?0ӿcl6ۇOsaM/6GIN&EW"XXgWZOʝ`vzى 18Llm.s c2 \jPY&gkAZen A)H3Kə "'9x'SAuϱaԽ`OK OǜZfVhBEuhd!+F-G` ثd 4I@vU&yVP>%$ $$ %HIT lسFÂ'hV"/àm0宜dO<,T'(H:M8&6_,xra}{).)*6xMfprA`2;(rpm5!gB:2; ڑit""Kbę IBr\R!L.sGi"3׳9yxtq.J;ֹmtB>Ƽ7?O?znP'J<InYΦ! g>!RG%Y 7J.&\R&SAx$IPbJ^="< "^. \Z\D0ێtH$(۪Lg;Vy028`f RɊ(Kb\p IJbg/G30FЄp7wݒcB|<}XTK+zܚ;)Ҿ"[;祚?ּOGG|R>AY YsdTg beqk :NC{xFiCZ#ϗ<ȂV,CA )T%ȌXQeTN&$kQJ%#O/SuOn3;Fn'F^T̔#s["m+)2y0kLxԞK Rߛd-VXkdvSš(CAJQߖ rߚ8KI}rR^6ٸ{t)m\|SDv-+ :vwqI ~*6 e.~mg$'UiFMĖ5!~X]e|s4Pru:@-gV޵s^ܙFL٢oSh^^ԁ;a 30L=X;jڽؐ\~]=lqLMxln  6/b,HpdQkv~wjvr6R槟Z6xܶnR{kycV{񇻛QmXE#&М1,f>޲ӯKeŨ/ bL^C4-wN6lxFӫb3ˆ!3,1h=s$4d3}œQLrg-8/:Wd=3̻IH1ifu60["8 IQ0pޑD(K!H=^/W$0Uʉd<=n~sՐGǣJT39VM,D~ͯQp.EmD2Bȕ A)arE\c=>O/`)aя)>I @oQ,\N_!{6'P12f^z Q;o.DACg% ;wڤN&30տ S3 {]fUʣ|м]ZXlctdi@,@ YoU6M3!%6QD`3ւ~ȡXry>r0pHHRG>*tuVΠD3F3g5/ϯ Jî^Z2/+eS͗6@TjD4w7uQ_WI_}&}U YjhR7]*GvznPyPAtJBޭScWwTN(wa4v6雞VFgH A2)Rp.1̣So2 ܥą`Q*K'`; OJLLZ|sDwl: #)V" PR5_^ޞNlq{,#[L5]4$YAu duR" m.q_L{:I)F֟ t(1c߹M?!+|`4C]S/ F1l45w]S/.P/ 2<"׳1ݩiw5աD=mJ'T* \Utx(LD@bÃJYP..Ia:VXxXrg˓FLATJQ*1,GaA[-d.rqɤ\H]RdYkoKì*`{Ft><~2bj'c߆ `*c>_) &{wR1G4ީmwe05P:T(3s69C[cWo_51rW?'v4k8:/iatq3OOkߟ~ihhަ{%E̔STδ:58wzիF{M|׋>(qO? m+|m/]K3wID;ܰ}ׯُGnYsOg7O/XN쪾L߹_,=̾Ykf^砞d+.3>|*zTc%h~TV~K0Xt̺Jq+/id V5FXְ5-l:Vf !w.{8 :rKK :S~fV׵)&뛁)Ed{\Ef1͜lrNJEa^1̫=FIG\UqlW7Y͹v}nI!ݤŮs:o /Ylvrv֗W=؏ִ@n֔j*+WI?5?!766eA_&.uw9%:4qw^iY旗=}>m2Onw-7^˜} +Bz'/ڝrNĕ2J|W(gn.l}|\իە}m 3O<'f?ofX,Һ NwtE73!N8x-Zoo\Uq B zr@D(FDW53|4tU ]ZP =ytGDWؒ9*phV骠T0 VȎI"(hUz,tUZ9t*(Otrkz4tU 9*h:]zHW 1 `d++h Z1x% qW"hj,tU*1t*(ꫡ+ԋLhgUj?`Bk}2C94JAWbJ>=X=@E$o.k&1sE:z=؞@n>]WEi{_vEm#@Z:}fZu6O rzJe|{Jz,fVI)} ]̕`. OY@9SfvRܥWTi:P;.'y2\ +Fcpa,6JA+mRFy6hQ1=cDQt٣ߒ 4j<WtAki> i2/ ]hBWoANW%ҕFs3"* ]F ZC'ztS+(f+(h ʕн^]RGDW]V ZvE(  ^"]7vOU;BloD DW/vU (qHWhP6"",p9 ] J9iW_]^f[ {NZL~(vOAWr _1V{f[]eAźV<|kZWں⎸gMofre]i5}%Gvծ25=^ǾS,4FDE={0,pCp癇)Lp8=rd);^׍-ޙ{Eb2\UkV/>8͉䄂 I)4 4sQ|^7;-On+8tV>tMIJtAy\WnD]ΫӋ~[UΈx` y6U-&eãRD x5wLK5 xڈ:EWd4)L&JmB}!J%5\eˢqqlNjD[ 1*2$)1ZKbN;E":*('ZK$TS4K@\u#\]Ct(վ.o.H꒥ il֌sfUY'̵IdĕKeDZ$ZŴ,mLӘ0t̬A9zWAs1c5'@"ќYzN–QgD#I?3zXp%4-5zrO,i+ &IiAXuܝ7;!,.֜x%Jfe1DrwV?¶QGrit8Tm6~A+|hBr` -iE'Xf| V-H"jTKEAy ^JLLBhI ȢK.{ Ab:P I: .K1pF+9I魷;2R]J9^7qƳ-DH]dg.L6$sKOWIT+]`A+Oz[)xLsiQHt k #])!M2vFnPȤU 4QOQG7 %= d/K"1:mZ[I4w-/+VjHZI\GD4>*TwmY~M[~$$Ơ14)ǞEKR$lYLnluuB`T^# L1G 둔Nٰ֞:C*ZD@ %X_;wMBA[U&J9)O [ljU!RcfQ1LhHp(8Y9)G!HTP&׀T߄L 𯒡2UWHPcYTB29 VinB AvEm%˲fH57]\?7(c)(|k(<%X !@YP6{iFn3%!2֜A1 q'`h&bt404KPI!0ά :P%@ `-N O:2]:Xi65hlD[/7ꑔ㎂EUP-kyGD)J6NҿjA!NMFAꕶebE8J]VkD5 -32-HA+DlZ"5rT"ZJ(N 6A j"b C$CuH*мGwU+czh2&236~$ %ݿ\ۢݵbF\*"1&"9ih>(ؼ($0D!N&MrOkL0r.9)¯׵NtjV mCka$-A7 /ፍPMhJw%SdIW=$ B+X(ITxmBV5 exi(^Bdl߷(wy4SU[е 2"(QwPҧkC^AJ./sgmG]I@SQ`Jɓf !nK)5Y$iNJ@K^BBw I5MH={Nw=/w7(D td!."(IIb&#Et4$?<(Bjw7qVUp*yXT*TeI|$,Ru9QbզX5{-6ot;'mY4g#MGhTf%ݢZMU{9 i IIX@iT_tВ&ZJmmzvU{`6~4N kX@^N B;)Fv@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b';;iO VV Œ@@ (@V N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@zN c986ȇ@ DOj ; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@ DI|TN `K'sCtJNG @b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; NzjVzl9M5^Oo]ZͼIY\Ww0d9?& ŒƸ?xP6.= ;]oC]zFCWWɱ4ҕ 1180"z4pBkC+BX]=FrQy9b[7"ΏmeduG]GCW7jT#]tq4tEc+B+ByP`3]'vy*!;]vp=jZsOtu;ѕ]tFź'4yz 2G ׫y}L>Y$u#PqOieXeyvZyʋa+m#V.% muKj%χ\Eu+A{FUnq1߼w1J"Pc28T\"Adbzq߶zGvvIFoIʒUSѢ[3T-Awd͝|NJugk0d C\wNs];1z<>v1h`RtCLL](cZ +(MBpKBhx5 <\<E 4+XPZGIW֣uE3uu&{xdچ@+ f'_XwֶBIo:L L=_M~/&Њ֜s yՂh'WOVFQb򗯿~2xܾ|.k/:O:lv9ٺA˜ mP. ɳ- G _wKo$.Gu|8~ƾ^V~mT~H??Y-Wٿ@7{T2d)eyDfg7$of%.|g̱݄`}~{ ԯr-9Nfz VfQ D"7aWovNt߽`߲˫{w>]B;xɯ|p>zw.|vQbϏ5Z}% |yڽwhzXՖRKm mCeiqlM5"o67ږ" *wEQ;ӗ-w{\=áx~ٴmȟ|7RR:M_ۗZ##.߼|a>J]mZ̮^> wi|w]Mj޺Njqx1=[͖gn=&Pjֿgl\N_M?P(fl m#ڀRZ*D؀@HCͯ? g[72BDfrVEJ@d,XeO֧,Lc: RvQf3618&B ݹSVH؎rTPe) JEPQX Eo0&2VﵲZ9KɊ,}ִZh d#ӻp28'y Ϝ}lxY_-:݇϶qihGwpQWGg nږrڔ[Tź6Ehi*&d)H*V 6X':Y igqz^zR3ϟyΊ.{ϺmVERJ[YfB0֊1!3Iz*3_$7F=3G}66&Ӆ(gT _c- ~Tu3Bbej+pGF:#miqd~8p 11PEq -@H/iozxcX.ʋӸNTzW$JCdatYigԑr"NL㷧vr:OS{=E<8;t>]>ǟ.lf|g_ieE.'J>nQ'֯z")-f,`?繈4XzzM07̳7%Mb&EI؜w^.q\^˗4d5i3 .lj~|tiT]{oG* .#! 8dK I@CX"e[>_Р|E@ >&ը?a0C9d:mpz\S{Һ0_p,θJn_6u / yC8.j׸b7 qU7oN^4O K--'keۼ5lQ|h& |H_6*C s`܋5*7ɑaҧd\E{;Ma 7F᣺Y e5uiT7kܻl#0P>FJ+Xo1хc6/qV՟ifarP uDK8IurWw3IUN4oY $Tjfw Ehwh6wx3`:fݺjxdx8ٖZfΠoV=c%/DEI5[j'N&RŠ[iT+W6l?xFUN-D5^);m}p^ZdJs31 #Xaj-3RwPGWd\ OݐwkrD\`ltw@ >p$2tq%8 U; ^kTgD\H+{׉KMVe)ZU;6Ӭ t77 (,;ƅg/C7֖fdEʱekk,,]ړy4}; m`|"0;lψ&7~tWYgESO>N\4Sj[, 31L&?yD=<d=|;dg/T 3i$ 4K@H02DAt) EJw[L`B,RVa S Z D佖@hnko x>{(- eTtg5"VOeSdOT~d۲J Sla'$;׶}qZ-l2_Ԗx~2?-XʯNټDx|YoZf=龾_ٽ~ɧM~ވmjnn~"\#DyW܌@>7Ad? @IRE8(cV:3F׌[BUXJGQqvJ<#%]PnE"c͵W ᙋx zNXR*# FhX)Ql=N;[B7zf֥" AcJzgSٷ˟Aiݴ~j͏UtYi=L0u,ojU/˔|Qh+\w7.\4<03mC%'$RY(ǂ9qq 1K 4:%:#Hg"t p*o DpȾ4w3(ӧ{vMñGG'BG8Mhh塼n~.L|!nc3`hf)1Hx4M|=8:58B VyRJzoiԩh*Fe#)'odB_:0 gra>uXکkW k-߽4ɻO*QE+Yd9Aq̨%vX(T^PhARfV3QhJCKfl~s"`, JUDzJ\5[̵L= t!@KN/aB3ɼl;< |%)P7H[Gy5yfna},cc>aCrÜQf YcI"mC^{6G isT6:9J[ІB(2Vd($$cZ=>&VL$2u] { [uU>[l9@OўȺ!߉ eWT:&ī)쵔jR^:&QU_:OX:0)>ݸ!,seY*lkC-`28ڌi3Ϭs Pp S-},2ep ԥNG+B,Z9%G%[߭=19]=>Et6z_xdwFGZr>3a٦qD|;J|^9b*i'BAEE$l{ `RnkJ9A6(kbW@I5QJ6 5C9wFH𱭶WHm؛u24] a'2%()쿴WAG[R6H]4HIEf`NT/Eڗ"KvUT(V,$ZG=^c,#E3f2aTBc+E})=McYu)CIZq!"cNymI`x)FGHIkC-1C.:cQ 7x}(׎-S H"dGafXTqXŽ6B Kp=u?Og&CyWuܼYa ڵvp1>6%G:E=`D(aiPᨕA=¿ 0'D"hM ʨpt 9%r=M)9IXM@q&1F2Zx|s(jՈXA%ͥ> {V'u!}P~ޏn}if ܏-Yn|0PxfXqR!qd?yln?)./~~>޸j h b8ގnR\׆l(v54ކ Os}$7t6 ic:LQ~MxR(X$oΗcF-XJQI6W ^'Gc2sdW$K| KRSdr;P? UYt+cCbRA+ mGh ~pZdVE_hOrqUk*ϡӁR$3Z)e x]H3 "Wh])ItcWmں { :YѰ|=4_uocDe`\a2t.mKLRX3Boٛf~F[uۑn@7{#{Dknh ͡ʑKsK۸퇍dqq}zƇ<;_fo~GSRGfwƇSpm|f N'y1+߿-ȮZ~Ϯ7x̭^Ď)DbM!tk|[gv ]hld;YZhtx<'ݹq6Ǵ2xwx-걭v𓩷'SOH/?[iiZ.V]scgds]*q ~%'yp+-˖/6-TPR7 JoVs pU 5s֪lpR!8(,%g GCީMzlȮ-$ʦzuuTJ J t)5[#"Js}N0n 8A[Hp#UA(NJ! 1B*FA` q:|zW{B4_t=>ջZVO\[%ոk'mʋw54ߧ+aꫲ5?ï\7Գx+F*mh^18!Y2á ":K&&O7el\VXhdK.64&.N\f;`l3A率Rb2,$&I$.q'.>khc[<# AqwWe?GnTө-W (EA?7e?)Q"m#]||(J4~=EȯI*ջo}vt\|g_Ż[Xx|#e\_产|?~Nj0f-n*amq+xYYݏ#6̾e~׫$@_8 Hcթw>w91^> <~T=V{_'W?OVG~A{7 ust0^su}1\`ϖuUw渝ϰ7-/nX>J-+S?ء'k/^ϻ~3r2ZTzm^.;t^xI c0)oqV۶m*} LT.,WOcq10q< )ƕ1x3q@dBBIAPr܆}AIp 2sz_90 /N6o}I4zqFԞrq/uwVy_S28k=Kz)8zƖk0>d*`er`EtTϥG~ cJ`'' h!ZF I N@pQTv/˦A>KgoBm - PPh%Fj5=F*j;PnW $r HTpuҠLU?4}U\ZS=HW; (x;"¶+Rخ" &\ `ŚXWT&F*pur *HpC"֏+Ri݄/WzæL0VW=V3\ S)* nz_r}uvN.YK?Oa6i%tjyf]t3NfV%QҠ Bwu >/{?(WS9w}&9_._z[n?ZU %rizO6>1ҔƽCvݭJ<2Ad *EUD0m}Wba;C=^gW6ۚfm;  V5+ˡ\Z`T*9q%0k(BᎹBIZ2-(X=pAoTQP0gZQH-}RYGyjNtpSr`+ؒw২5{LZY=I% i)f!\཯&Vpj WT*3q|S5+q HdԂWRO!JI 5+l3"+T+W]&\= TpfpEre3 U;Ұ W+#-em`!U\Z[}&tvʂ. ֪\\ӌuEj] \#cؗ!\`Nfpjq׎+R)|/WnæwL1 ,7H&Wy 0nO$ R 2\rnz4opJcv-aZP9ͻ/WRrlK5of%V2ۢZٖT 9Ͷ8 !ڤTbԊ?s Q7N7i,5i4T):DLKW + Hi&jҵTV|Փ 4ԓ&!\vBΤָqE*턫CHdZrQi(fGreǮ.;HZcJk4+vn\LԺcWR 3qe MYW()hW$\ZWuWpeb\6++] J;%p"1`!\X=3H.4c]ZU}T>K`5` 7>8(׸xr1N N/E6_ gq~q%אָ|tSC.>G'xneĞmRY3>T~w/̧Z⻏Z g[|,?fjul-O;-¿r}t#Gώh5N.9rKzmyK( w۝zH{G}/:eB &|hV< Zаq}q˳+{.`Ta}?sf0 ٟ ͤxݒk#W+=h`5VCUbm"1_͞~H?ܻ6PCR@m?` T-X[;k[5UrR5ۆVZҴOúfr5!vuMkOݴ::/en;ِž (WV|R*|Q@Jz03C8^C9rZ<-(&4 L\Z4S@O>@LKkXSV% 5^n3oT+uV%Sq9ْuElW$VpK+RY WO+!\` y@zTVVeJfV-9(Xv+˛]ZjTl8˚HpC ɵ rV}&z1$sL3"wuq$l`"pq{8ŖaԧE,őIJݑCjȕ$"93Q]ݥh;]%Jց~3tEz:6;?^;uϻnh3#ʹMtEzrm$ٞ꾆s؞jS퐾dRaA#T[+v)V0j(gɡu- pЕMf)tĹӕtHW s0ۛQxEg̱cDl4]Tn14h;M+py4-DWl9,]̔uՓ>v~;_ւ;O34}X}Gi=l3IvNwg;N\twS Q+>իnSd(1؊j" ݠQd3oWg+5'Rstҳoqu)WW;..gg~:z{|?ZqDۜ_ UNO?oon;o#iI+!˫4bXS,DKW=y)o;1x:0%c~9XƸ{ B~XQZ>hxcRƞCSOގC-X} :V!Νܶp;_D3Iv̲R~1th#ϝezt%ܽet!ЕM~)tq׈;HW  `Kn1tp9,ӕtuSHӂ sAŬS~JQtHWf^Ђaf1sWg?ծ( _$]E/,ʠv1 / *NW@IU}btvuuh;]7r ]Gv=6 ]snp3]>דvC9tetb2dӰDӗe 9 K 2ھj:QhENHNKLA3if<% `дui)4hC;M+MH!y^T/nK+Edt]@<DR)gEkJQ;K/ffGn;A}˛1GpٙLs7͛77Il;$/дۄ>7Gifmy3+QwIgtA䳳5~.g1[4".H[E}~l~ibykxs^}8;?`}}X~`/NjHo[u{>w3d[-q](D*~G7񍪺ĝ|6T}qi9u~BX&<o3Y)@Љ_Vwh]ק}Gq*آ̷Ԡ ښ>LCBZ\ۻyB*ڜI"6{Gr4DJ7-3`H-Av4ʣdsrŇ~-BmJVvM~9$T.\m"p%z:Iƌ0&Z[D.=yn̞ИU[* T4jЂ׾MHfI»GYoM66J{І}  a832U`&?!Kh*gs#QYG&ɂw@x 4ID~/~Wrn!J`tTOd %SR$U-~ܽOIR0 r}&rDɔ{r5nj1(fȷ2Sh>_*X %%$Mt&Z}R LIO.V[bZȑ*b(l݄FO#Z39[0f mtAY24%Q =wuTVhn'0LbeP4BZ%;6:,pO j3٣1TTj i~mmV65 "d)9<7 %NA2hEȋ:±e606Zb n$^@B+ ]G_ <YZQec``Ru!$X~s֣6J1cAEsEX mzZe3@Hk) v N-`׬T0VH2lH`_a: \d#J HpPRZD2 [4MlAgb2 lvԠ u= 5AmTzdܡaS`ducHk!$@ J! /*Ju>: : c<:&S1JPk'Ā:s 4Xg([*M< pjVz bME83%ta7 d@ Y˦[h|֑: y*4uw Jq&NRYvUuAT"zzm>F&YcIF $$+EPNJDāOUֲOȁE}{0(Ü8P}kuɈ}Ai!}p4:Awvł4$1P,4: z0U8!a%s; &^XճWb*?Յܪa}Q7 L0B$hX # 1=*4&KP-dYm jmlAhLUsXuLΣ&i.|^B{[ɐRa2QӚr(}Vba .M>/|OyLPB_ )[nmVH܁m CK {USUM?{Bb9Ӆxi&[_֫ I r!ӧr0~Hv?JH2pU/ƒ @p ۦMЏh <4 3 ڊ[L+5Z R'F5( CL@qZx%:98XP^xs9G#.QqH"` EH{`v,Z #+3u!~Z)A(@.hޓ5`=mGCX2:BГ [E> \8 {r\UlKcR`XKEu,&f-Ԥ>T 8y`쾋jh$TipMH]/zu"C`E%荱 0H57Kt!v-nYnֆޭ`ؕJX/lzZ r c6_~tu6WZR\/;Zdj>;?vIǟ8ӫO>@.֯'G7zy5q86G' ~ߑo:ut쓣I'w_,sm7ԯ>Sqo-]ym_ oщп}] nKػ6,WcŦz? q6& v0@6QO0E*| n$I,y@SS޺ K%}SCk+%OS bB%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J TFԙ@ú̒aV2}J @v&*NH $%1@B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P JUI0;u&XvG $q%@pTHJ T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@B% +ר)Ι@JҢ@Q J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@:%'ݛZ޳_%մNO/@CJqh6>@6].k p ReK3.pVK[2Z;DWX*pYgtNWA:ERV+!*- ]\KTW誠}lr}8]骨8]t \,G`_+cgBO3tUĮ Ǟ NWAzJf[W>uj?Ox"<=t%njٴ?(I콉F?XQYMBG?gxլ8z~荆Qoͱ.ɴ&e +xEVeك y=Ua&X!UhxKT ?\ܒG.ltY4CѪ`~VQW@U7W+o J{M> C!A}/F<`>^AQT}jI9pVU\=d\ ԲRklmmld**I 0'-۩<5i]uٜ.\d0 95W,W*(bNx|B_kQ /֖RJU}Rr> #Z.Ta3tUr*h>v*(D:EbUb-\^;Rļ+We{/r<`>7 *EsɟË%_߼7M[#gYQÞbK.k>ܜcXh[!:C MOWJJ(C>A%'ֺ3tU\P2tut%^FFu.'+tU>vtUPmNLt \몠G(ZW'IWCl"Ec]3+NWZЕaJq!*uwטЕ)[c +[luUXWWUAԱUA0+c3¹j/J B]}QYe"!mu߫= f4ZnK1IDED * `^'s8sQ]3jYϔ4ZB;;xY|y7:uVCw$vpx͞jY?.neKqK+%'0Gmml\?y%7î?,txSal0fq&.roMW-?ތg /w95%R0zrARdFHOi#%$ sO}j#(;" Sc]umfB 91y|>E-cS2T 8b($ݿxۿԴ@AHfL0Ղ,Dr2Xk,阝O6$mtTi|E߁ y9L kgНJ0JicC1*Ui'%̗# >@aR?{5hnY|DQF~i>2ڠjq&h6h؄X9g<"M{u|(4N&aJޅ(k?/IgWR|xaj6yпlUH "^te[Aȶ77 MW^öFR55 ˜K-|4|ZTdUzG$U[o]r[/do0h{~4׳ pWSRnd-U'~m6{} /oݫ|?|û^̼{^f@ LSl+``CU_]~X2n]4bO>rtu=Hʑv+.mNM97 pއ#UxH|V![D^ [ë%in{EEoRjpk 1`?f{)pik[1| J};}^~{T?ISt;birIEv& 99l_}:p:cs¡ rc :`\&^O-;ʜQ"U]}wmf纎FeMPwy*K |RpXzp"DN+:4]?iᄆčX9@o&{b{N32][^Hsu龍}[ ߷q@ml4:^`l){0p,[>o)'p!3JWrԹ'H/5&<숒y&lC&soO:qF[AwR+%u+ުH J2uQ9m^X"%9d 4@uzb,Rde58} z4OG*N<(`I!W&K(K(!q"Y&I'M QFfAj/sGڱH5 Iu N cVd}i>`UITHIm|PbRyô˜ aFge),#[BKٰy6vn_Za6ᝉ&C&ɖ9K-:Ϥ9 *qEp .V%uJ.%%33xCmJ } {KHeՙI]rgW3rdkpvL8(DߎdQ@?5ÎUIL r$=J̹B9)39CRNpT%ħVjD[Vv F@! Y ! D3.9 TyĄjkpD̫1i\yXlO"m(])(?ttޱaKrP}rZǁ~k<8ɢko~؆9+T: SBk]&Y#%͊비 VSNcH0S[eFet1{OJ[3 '1[" 6V)*+.hl[65[5.lgʅe.ȅʅKDlY7Cj.a'&a:l d3vVKi))䅩wt ngR)&StJYpMs*VTCq\dYMt*Q6Џ:oGE!mZf~4k.汰vkP6-AF N- 32q0WJeR\*) K2 z|hrIx`R^tID`(&f*Y1H8I;2~>c*x4>}[PF-3EFDF3 a>Ip ^(%*_HtE@rΩM qf)Z\xGH0(@4!'̈́NSJ[ˌej>)Z/9Ƌ /"/޹M M&GJ-9[  qg, |<0Jhb.`y0@Ii|&$A "qCI!'Eh!Z3 vj"YZ /b^ ׽ԯ.:lthGn}'xGM_s b[zzʈDTص~GCi TRLg*A9L]Q4:{z=m;WgmxCHEAy\$5kJڧA bT%GbVXZ*)\-.72m8dzyy$E>}: s¥$)X<`7\$WeF p{Q.~4N2h$Կy-e-*}yf{g6WxSo_bYK$#T }?[eR&do׷^SrF8&K>/ 3Kx;KelB?a!pxp5'`l^.N>ӄs3AG]}KVWC8Tl4u{]8=i\B-f[?[ޠwfp.ܢs0PlgQ >Ĺ5W;zMZ Q|loƳ?MAPM4C;޵K|ZyVhXs4}<-v'XILll)žQXR 4w#8_v}􇯅_mb@i7&vz_QJCns; q=iCPn3=A-3E"0sEv.w͆}9ޓ'xrujغmxdt%H Զ=w/:6ai3Mɏar9looXwS֥%P/w`tbo Jb0@!v'm}ЎX MQ'X%@K{[8ͧc*tDV;P<ݮ$ZcIgrһ 0Y΄.{WDvv%[XqhVJP.켮rj/~p?>ST45c3y@D'c23@\ ,Re|dH*XWrCB}],_U'KuhAGKi(EKq9,ӑEm9"]+w +7}q}l|aVWbq,)H)C21)H4&ܫJOϠ;bI{&fL|a^1\=U0|1qygyKYyJ< B ?5_|=n_pl#+YV/$NQ)0Boܜi š[o YqnqTU)W Yq6&3t3;&!ۉF$(>Hh;;0pf4sp`@"'*pF)K4 T-EOrK`RkjcƁܡ`Y8QȤѸ;54CBj$5-iW3R&/1$W GN|w|U0Z1$wvW^@L(}Akx3+^s!P#PD֗=~z(qo JR`,"Br>iPQ 8O] C/.-n"o'Q' 4Y#I:z>%r`k~c DnF~㋦c'VŗV(/uȮbH^- 12,$bQ xo1B^Q32LM&Dey>3)|j&Lt:[mUvŰ bnG3/bkQut~^.G9Eͯjd8m`㯿 t!mE7{4a"';˓A˫<Xs!WXFJ(!l2.Q0-wJ,8 RT ;o /d_G6b=̥8 u-!L 9%P"urJ=.!&"̠; Θ\:OjD\ց{2cZM:8y6z2& h23l# V\9c87oÏѧ4e_<{) r10d6`5 eV#3!@4/nRmU`B8mP P2TAپ*Wc]0vnA&zc)H=ЂJhJYa]<1pieZ2 T8lY>2ΥH\+ HEIψ92-&n>jjECw)j~O)$Vًqj.eb%B8֍ѹ&eobDnX?f_?0/A +CUR.TRZ٘l;$yy$R FXy4IRh5DA4/mr (ZIhP"AJҦc1*,ܡ뜡BD Dȥ(5v4ͫ8{Ҽpo&rIhk~w&+t6xZG;R6܀;eun]]Ƴ6!ݙ~[EkRg骱͆$Wa?>59eӜ'i̭ue8_%9nޯz~h\"M祖d4yAv0G|8'oyE#]z-Ԝ?TyQZKe'Kȭg &űym U}smu2 =Hǁ)WIH%VEit@bMX \ut IxT$(bs sɳ#ԀD \42(iH]O!c\k\V|.,2Q<٧qƳ[zU|y2V4V6SE<7SKjj<(͍^=j#J 0=JW5*N υeSLO,8%SBBt2/$ Xx] ģx4t#4 cژl hXSg " _E`D]zp N⮑&]]hnh'|?J\[_ rl唐dԜ9K xȍdVp'K7IZ/dQFe%QϔfBbJ8Z?bwLax0x,c h3\״U p=}>ݤb+Fͱd1GV^۵Vw/U$s(4Vhdϫ荣sG.ILA@x2\A ,1$dWh'G-Rtudԭуu#wm7iy2K{x^XFI 1a#U*rG-x[4ԠaC&+*]+gŊ]A9kߜ prh߫,Ϟieq\CyY-ިJX*gH¿ ӣg'B5Nj6>YyTc ʒI⹈"&E0T粻M0&' ) e Diqk.qdl|^MqfCx_³%3yyos&GN JVŤ{)QZALg)|G<|3@m)EѕUiWe弧^Qy^D\}"&bM-o<%"87ɩYLF$SRz t2@(8޳n%<+#x;\ݵB| sx'GhUU֑]) ?)'NwV'!D2> :z2Wu?Hubm#GEȗmY| Lvfఙ|uQ$%ݟbŲ-ɲԎ[q'@b[jb uڦfo ;R܁K]Iq* |&q 52(Hr`a nL5x$+:k_]l#ya_p@"+u(0V0vGXl{0)' WE\q0pUpUT]YۗWJ x@pEK<"qdW$}ji+%vpJK%= "U UMHXW/\ \FC+@}W$ +!F \q8"i9UrbJm9qlFeyg:K;w2-b-zfttZ6Pwk,:٦.RcvDO~{ivçƼʻ QĘghV%e_q޼ڷ%,ث =ù^E?\ï󸂽5+~"d>~tGw(0}0tϟG_Bꦇ|wf6DgEH. P*%g%)6KIeB%vIS€7TjGK<9(N+=Lٜ5Ƅ2僐K(Ќ` !Kʢ-hYĆcBg˨~w$PђB=?&5KV9] Ml(wZ.=!fXnͺEgmⰕHPK9`p#0DʩGsvt ‘g?>O?fŒC]9XcɻĬRԗ+ĵʅ:NfYOLJ>.z-ztUA9{Y/QϾj*m0Dtʹ̣=٫^KIx%gSj{gvwoz{9;CnL\_x{99}n8[!L)Alگ뒃wgnAAֵ4w[BkkY(H cj,XVpp=MɆӝrKkuݻ 5[{iaIP}9&ʽr7ﻇa塂ox9ar/h:_?˿ޞ>y psޝ|-:@ Z"c{Vt ]+o_q(SEn Nr c/οB<{au݂̽oۏM [4-Yo=۴Mv~] fXC!a7#`] e19FtےxOOʓPt1 Y;+$`v&Ԃ xJ6Џ>jQ'Y 9EtO9TofL-QY%Sj(8 +82^ *cbCݾN#:؜8ڬ;|;/z۬rlˡ;sOsff<^yC[auUTHIm|j>p]jL1Z%-˖貌?ڷrgjk(KױUE@!uiŵh 29szZ!H ;k]Fg$ n[am-A;܁J[RN0>6Ʀv&hy@jY%:*"߹*]#;7S{4F.q?9]m6^r"-qcW @V̥ DR >l*pI#6*fC6*ZARS6ccom9Ы6~ۻYZ:FXcOwu W+Zb#zߦNQ{w_7ӱci8,21iN #?ӏi/UFzP4~R DJl奷Ud:]a3t% Ia*sg! o#D:3H" J6|D5lޟq+,a5[&^oŐ:k,׫[FgݶDX;fLHn2^N0ŀjƧQ#Pk8hvmIP!bjrq>R §yYzA6/A=G>ګe{eFAR6fnzdD-{~?ͨwߚӢ?JP=+,VVK1d9: k!A~Ay:m@Yۍ~#bNyʆٚ++3`ЩXZKnԟWt<Ħ?f ҄ \< rY\5|B1q2- .EDȁgz(śQ $=JBITNĩΰ E:-c>5 .ڔX&Hfg 姘H =)B 2@ȳ1qVtZɫeWCϯےyc! vMl4\\iD.4''5K]{IҌa+?娼EutDdH^HZMĐh"2:ǘge[NГ F꘭6!(IF@tcJcX uX(;,|R,\HTmrY,"M/{y_d0^*7;G쬂'Ch4CQJZ`_]W[蔲DMsQFdn(dEMJlQgv\NkU$xfA1qFl?%nlvڦa6jwFΖςQZrk3)JlT tR%e=)?I4LF H@bd"išb&Rʨdg,# Dva*9Pn[ Sb[ٱ+bx@"lI\[՝m6x 94>E\wy<ՍG? hQu9 JZ)PrE5 >hpBU^T rJ/N}] dhZZiZiZgg(LHu8 :8N\GBD㝕 tաQ˚#"B kS`|L.Ikr[H`"1]f6lY7&Ζޅb;z,~zeIDK(iD1 o,T5IXNaTLև\" $ 6=HL1 a%nLxau/\cɦu!ozЅj5>h} {Qm&9KM\-w}&* 07Sq+μl6N$CSl7OH&ɚ`>?>>ht)K B >% dt&%y&Xx>֩Ȫ5kv|EEsf;~L|2osDZzy?hWɏIsfeL`CTFNg3z$RuBw$Pl T /8Rs@ܶ<|d B f ܲ#x>V#}gÐ2g/cêZd,Awl~̛F"Z847]sFWPּGU$wI].dl嚧X"$eGN~=H_@ C&@`VRZ sJe'<oWOs뫳>?Bqa894l@//C <b^T7@z>u*eY֟6YpYVt4>y`ˌ8My&4e.oP4̮~^7c!ߞߋ JX=zyJmpbae|D>oʩ;g lwU30j!poq1%`T鯊v?h½IQ'EU59ji&}ns`UzJŚLGWWx^T Ϋ{U_nAzYN{U.omIpͰ5pKp>GFʑqo spw8;fbgr]麖>&Nj5jr+]yn7XSWz67=G*Gs'ץT}v/v,DK&^ollPR 4)'}s*~Vu_풬|8]R)u }nvMstyw᚝ˡwSs(< ㆅJw~`3s1wEٹĽ8l8r1l\iڶndVIfQ۷pL؍y;ceRoq}ao661e/Zzy]A kmo*2ZXsZ ;/ 'UT}~;>.zV}]wm(5}J/2׃WRq(~]+t%c_\QuoFXޭ\bqvwm`9.:E 0xH9&=E7:M *8-6jIr۹;]`glM mo< #AGpUO](Lm42J92Vޔ9Otz݋Vg[lOP5F^^vf4s`@j4 jO URiPD KZ%r\{Y,h愅nV|~L$ c'VƗV(/u.SHo+ Y12,$bQ xe/X&xY%^=cEkdL ʹ 3\g!wf2 6gz39(玹6]^w_u18ꪫ8`Nߜ޶h<@5hH(L( Jki$(1@9w>\-N[˝;d/Z<&zc)H=(PJJYa]<1pieZ2 T8ln}.%eK빱1$ӹUT%O5sN$ ejv5rQBKK|[Rlً9woF)#xm]++{YTLFP?f/~oA- IIP Jiice< Hz@ˇPTq^ꐪe+Ia,Ӛy <lr (H2"MۏMhcTv18AC9C%щOFk5)Xۉlc"[Sxt fĴ5fr=K/R79EcIUIyO'c?;sq|/)~4fZE%W6wZb$JTBL)xkA2RRhz9D磨X=մ 7hͲ4ַ[ll=5BJWPJ~ǘ|i =ԡ"}'J̨f9Q֨}1gJQbpy +M\;+_O;bSM%G {a:m9{ṳt;p h lZrnMh]y%^ॵKImd+Mrwp|7Q"8nxkW,ֵ]wmD0r~F7Lg۞G[39f!i}5gӟ 59~=ٮ{%<|{# +꽁 ) GT^PԱgoˏϓk-+% ΈyIQPnpչPYw" D;8IA":J*OOg4+G5XmnT$LG#uJo3`J~q*-V|uyiH cx3`+ ة4퉮< ~P,k4\u3p0~PJpJ_J~VJcJ!,][:%5<`Ae!G]Yqj gxZ%U›$B2<(2XX@3%p}V pٽtꎙ̔*eVyN%_JG٧;çZa+dRp쵣gҎ($[RNW~Z~g$1\MJ hy8i)s+G.ILA@x2\A ,1$dW'Gط9n1[{+E-v. ;y+|jf>|6vR%c)d +R`xAHJhAϼ0o}!M3hX{qȖħ^<.Y9+޾y&FSq#\ʣAj~,hHТhܙJ0= Df@%V xb(( oTx),q$Ny0ppx"' 'v¬YNL(P$mNEE1)8h(-19>(1nK+T²g")mx9yC]]-)(-g g83?5(h5t4R0J,+JQ>6Dܟv95GhXBYG>zz>KIR$Cμ:@bc̉t Ib\:A14 ,ͷP QPF6ra3`QD :P^qL<-spklz1ҺkG_=՞޲|˒k9`& jm3K մy3\1Z1 G$2䪣W,d*+A*sD VDRu,*SIUR^\@qyLJGd "cWCWH%gzJ %W`q`py!e4Oyq5wlS7~Ճ։DEs"zOhĤ,KLP_Vc_uZtCG >E-!-zR %pix8jrE)1:xR*'D Zy-c(b@{5~?tVjXjZ'x>Nx?ψ [A]?w1MJqSắ̧;Fq.ľ3+[*nW}aGqp7kY 9+qjL؛Vr+9ګ<~^{vg&}^ǁ Y6l|7VCy<܅:΅0W7 آ>7e?a1{:j0HMwKX*`˟7kiea{0&Ǔ ^ww5poq= q 88(o؜ R>jYSi%xPyЩゃ~VժCxwN QI;Pn>fTG̲_wCu-yIH7G5AwjnЎO`VAVؽبp[쉵xP8h6hG]v>BO튀!gm\'S"7&vffem*4oiS=n3;LWߘ;$jj>ޱh6{v$5 , +0 -ަǼ.}Qk778zuY^Ksfmx!t)3D4 7VKQ'8%rHqϮ,:ȼBGdl1CDt 0iy7QzBLKXe(<}XNgjNXv&/vUm.!4X?źQ}#mPT *t'=s2j571<@E|c( Pos&L ;҅B\4q&A0HF^ 9vo}~\b6J#pBکL)KZy$ldRO9 ڡ9`uf|EsA OFėuN(/u.Sa)XPTr&Z8=+Bm' ´ FZ 'FZU6҂6Q@n܆PGp 9+UCûEk]ua0s76+B<»C7/(>qӵl|ې|GբdAe$gZEiqra>PC c j-j@8˄^TD!G(p*1٘L8ld\Q1GF,Rx&){ԧ %5rD q'FhJ?y6(b^x4@5"p>\oCKm\`77|M?Xwqk=݄]~n~}:q}[<; _ G5/Z)/زJ{B"Hu ,.]L8ql㤽Tu棤+5/~>~) Ed^E;kw9굩QT0Q"sW^(]sKI$Kit[G9V|: BL+^jx .PhfS).%eKu1$%\- HgSECuJgjjMC_?8/Uy}锄zʯ9>7i0E,bJ\)P_8vs!U{xy&x)zxyDxy,s˯:Fg ޟ1/BD DRYR,&Ne, Be} o1J4ߠl8cs˙>{yO?pǓoד!n:]jlz3vm79PO&._H6U_J0p:e RR/xO~yMgI9&~A0Y0he$~1+ SڍVl~ك_ji'ܺI>-\ 3[kՐdڊ1lf8 G_%L17]&O0̸fPavp[yzj49ǘf"!:~O@;| |O j:xo]pք{c;dGReҽBmQ%KWM\6k3~2FCӂHq˜[Wy'{v]%۞WZxG<ˏdM<*r{:n,iqL;S}ytGI}m6b{.i=mUŹw ܂nJ@*VT%E#J+${sܮ;Wt{.7O  ܂^ 9)2Qoųk\ υcSLO,8%y#" _EU)6'P+f@ܵՅ{BF〈q~,*i@+)?K}p$TYgL%D*PB #G!XQ'YYF׳:KCpI6I祐 7( )9"L l&dpGiٌqp\XË_,w1S}ekqud +dP&e[c ETo۠7b4OS W K8k(vقo{BWDOfqz-(-'WOOd'=flX5f78HޖHn oUŌp*p*_PNn^ ;a:&Q$ ""IRai`\L!(loS ]S*ljt;f8޷/|UPOpC`o]RcTHe^,9b+A)EU@Ti\W{VZr|=WlznJ&P1G1Y,()7d@("G{#8;d:{cZ/ŶZ:r6$yI^NyȑQ.*s NG1kmWK`o)Ϊh)E#Qp;PD 5֌IE]i\ʢnNذ ,-/%#h?k*-kȞ_u +.:D S0h@ XD}RǍ Nh4MI3( ϥaRkĥ2FNp0 @jsU@Ho5n'M bh.cɒ VZM.ˇn&|i5MkY)~Mg!&RiВ{tћ`E޵$/:~s%؝dr M解LyH[~դ$ˎe21 E*JHA8jDV %⓿v.\J?^^Ǖe{^qC 3%#SSȎ`!i(U(Ѣ DEMN<*'֊UltϧxKº1 ҧ,IbPp5eQhK8L9gfZvruz$1`D4 ʈa!k$ jkp*$+T4Qi*9@A J8XQ! "!(&qǨV+!IP/> bd2 W:iB#'n^}m=.TMQm[ơq30*7$΋ɧ:38&şo1;>~wKwJoJ(MYW(5qXN,f_TA]5HvxFOrhgx燳9arym$ZٕAMH''>t \X !#Dyumh(lQOهx^ٚL=|DˏJ5h-C=׽D*joׯ5,pxKH_R>˻V_k7կ͕W^l*g׀y`NQƣMrsѯfOLɋZw8Vρm-mՖ@\ff[ٜ(oPC&5GROj|ETnm[]V+G[Ȭ[卍ܰG/f]gf 솇JorGgq:鷿9~W/o~8^1eſ8~\u=0íԽ!;k:_5]hUz o?\pi8["q}~m/:_lilo4װCӂiK+oi/W5T\? 5hSZ+G6\$d'?u;I%&e  h&b4jƢ$!)(8N_z)LO[븖I!䬴R2xABy\ٰʾNֶ6'vݝ簽Եժ!dqBӠ&tR.l-x"!:Y֖8qD8`C4u( <ۊDw&%I S/Ӓ:gKp$قZdWPBZA=v#`A mǾvoe^M#Pĝ蓊 |Q^  Ƚ Ie<3^ۗ* )ݑeY h& {BbƒUIO Typ텹 J "}$V",T,TEǜf*# ?",M &Έh_rP.cIF#] Pp$S+FS GJx*됌Mc9>=;9Ѷ^A;܁80I]@"]vt]c/m9Rt\mocqsOPsx׀P|zb%!Eq5 w.ۆՇjڲj^Okʋ/J';_R, sva?i0.Pߚq%B{)ai=f5-dWZVC|T6iK-@ Ly>ȓ."hai)<1Δ <0oJrb^jkوV0:z.|BI y?3BDN HY4/DD@}B*N)}˂5L1ib+λry*.=[o~l)-HWG8'u!>a_}-Od[~y/5Ze HVR4dt4Z$E[q? 1$%qn\#ouH2Bd (l*bTrlBil[atָ/P \\FmȚ x{8|TҎǓOl3v a(C)@-!,`AJHD V\3\'nƞ pT8˴ɥ'$A%6)GB<t/ۂs;cI6L̮vkc_-X{`oْ7L3#"fLB6NJoA(#,|փg|PiiA-d )yT5!ȴ9MxwH IѨʶ]*58amQ;cӷ5?eD2#F%Mr (Ȥwa >{|Ԗ{Q$e4%H@&IEyPRQ(Bޣ%(YE583!/ʥpu,ٓ [E /xcOP:PP@.%cp8"29d&0&h>a(Z/>/v[!̇{<'!Q5;iFTө}ZQ4Po)(#:qY8wqC wO!@.C_8~~Ф ̷Gwp| 3!AVC^ECmMx: ]z9pŒt[_kGpavuT|TyÏW~vjVW;Ŵ5{ /~~]4 ujfXO5Գ(WjYzϽ㯦U:f \lK&()|yGB6u^Qe锌%hJˬ(9ѩH0q۠;-;;;m# #BE'b".ƺz등04 F*:02 &N & &eF2 ,Z#ՁHO1n{/q| i(S{ktg3{yX]x>?˸3Z["qy7Q5IZ i:H6CBB-RK(@CNh!j3ę' P娀6?ڕB :OWR*97CWzǡׇgNLWw ;;1cP߁@W{F^Hb>qbf&A+qyJ.pDs8 ?*QpYETC9=D~e^ի4EJ?s>2E2/08d[rLx [XI2h)ߨP'EIo_%wL,8 QJ5"Vk gwp .LFNTJ5\2{jdwB3{ddF?Aỡe6JF dQ˜2n]e7th5]"]/.<Z6p!h-rYժNgas0& +D ~ o//ysk\|DPqș49΀{D4eс MZ=OF0?F0U2Zy#Qj0]=AsC{DWXy#Q%Е3#ʀ5 ]e!Z`u(a=ER@H$ 07tp jyB tJ0]e#yY]o[\5x^lB>Sr+,Z'"^B$V(uENW7p9Omzg<$, ѿۓ:[}&|[zStOv?"\4Gs W/5zKZ}WV7ˊxho~7O,mNA>/򇿽]7 W=(p]j!|7oC_d) d_~op'ią$ ["ŧv}aޏH?OZEgF&ب vXݡ; tLnپ;ȕ{>]+CxSܛ! c>dK6 / sg]o*`+.OO '狗 zvY2hN[375vm.|qqd3[9`wL+QRCK2J =I4=I^<~(y[MOޟ5C0iywY/W]o`ԇylvo4Ut|;y~ J)(j뭚ZwiaV8a'jO6y6E>fOn%rvIyvf?cđuޔ9g9df7H?@[L~R֚'ӏ2Fg*kٿSN?g6z27C v>ua\>V-7δ#=P{=[`mw}w:# []-'rqpw{/q=um!W%6=6[.xT$qg$Pv7[ƍhqJ0 Fn0Ms4Tڹmj``ZYlOR \Ž઩=NT:^puV %gVw Z掫-WZU1[ \7dM!J&q7]lv&Wus=ԹT]##MG`nUSg+ކkvܓ`V\5\5o=q^,7p#sVp; \ljodJWaWT0_tē'Łq5R:RH~^(;jB/emPF6ƾuc@VUi4-\0;-ߛE9(M7(IR4j5JSuU}QFX.'VR)}&)ya -nvCk2R.s/Z)1TJ`1P'\5lU]/ZMUS)yJ{ { ; Wª^pq/Wƛ/2ފA%׍ZfﮠR٥f::Vf*V++Zpгq[Yʼn~h$]!l лL/ٶzٶtvɶGm%ŶIq{W87EJ3w\5j)WJ*gfŶ9-4yǃQvo"YviEF~0n\%ԒstS9}MLLkb-{U|]F=ڹQjsUS)"qe$UOE0Eղ\5:nJLj++bhWM }BWMWG+'R#\A~p_WIjA%1⊕.CW\5M-q3ئn`2S reZosi+p%wzy ϓ'Xӡqr>,Ʃ=Rq*W\pгx@˨-6 švԄnmT)4&6-倖O5,5"]}ܼx~Z?\C5eDgVyu6OQR m;y W!:rOf%b滷}胟Ϧ-|z y}Ԯalk*P|fx"n^563#(c^$T>1{t輌!jDBRJ,HBYdUxb&]6}? cGʒL!Rn Sr\ T&BI1V #0wi 员 ʨkeW.hgfkK`䝷BؽB .{B}r{uRm;d2 -ɒ}DGP`,Ǩ}EoܖvY$\UC-qUٜ01Q5.:&x2~؃a2@̨QXTDQÕ,<$OjØ#?{WFe_+h =o0tD$9$r"Wb)1ĪR%yyxyYY` E*2ӉTI#ZE.w 5,- ̂5*)8,xAm*gUnKo^`V֝" ߠt $LzH/5mIF ak C:u0q ynu2i1'4L~v1MsMc&AܳG"tU  ITb0`]ӿ4,EaRj I<51d`s7C0Nj~5|A _6N #`":3Fs𡄸X2A@ )0 LeKC3ZR @0x @`6jNntZ'$*VO:LD/F,`V"l;%F!2.P,9)$@ui,?uj+z3 !TC#:ի%B2\8-W}n+E6 )qChs)gAN߿y}%d[Ci;(W](!c𘾃 hx*X%؍f)]I)L&-cٻѪHlt\MAïcq:uXI ᇟz' g˟8['-'}(qIft>u߀ _BD~.ч0}YQx[ dN6\uBTS_Uڴ?"% xvirw,JfDܬԴ7T #%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R*0}LJ ɔǣH鵃WJG'.H%FyR@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)^sǤH50>%0‘%*R#%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R*B8zLJ L \vJ XiLxR@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)^诩#voG?TV>l'ׯzN*~fD ~p k{<%k˃.JHK_4o3 ss<:fSWVY6+'nJ=ANbxp8zfz煫Y+ك+RW7v:X\'%}gz )^N+TԊv 8Mᨸp_N'W biv/I%+<gb 7I%hT:rZ֞I}b 8+S"=00Q0k|45=?m=5r֓?v}zQΛ_BNt>ѩ]I9VPs>9M#l5O1vUF,wj%D7ܓYloj7uӲ-:)eT!׮I}C hNoS9J3ز(\Z%ġsf4Q^ GRE3yXk*ڕf=د%Ji`jxѿ0yl56'hwt?TS.^aJsLL7<nZs,0ݬaVzFSI/m L `gGU6k>tjV*FpJy1EW0s4p OfsA=J+, += sh`<lV:{peJ\`Ïg+űUCf'zp嘶昲6aW\࣫fSW/a'ͩ~@rcNGӱ:9طHo|j/+q~a{nVڻn;q،,R"*ZVUMJۮ{ge͛lufֱޏ(_V>ᖫC0u</Љhtz9ڮְffSft}-$8 6*>|)\ަ_͛tȴe+k[mߺr~^Xc%/n^ܬa\jb WwM0\/^!'¼?DmSVNV_/ .3mۮ]~i-Θ]Vj3'vݫჿzӥ[7Gwro9Ub9ȝqG4RG35X&V^ &VJLz+km(faO9¤C#֢g-Nf;%C"ՠ+K,1[Y, Pn!u >\CYd6\Ga7׶c}qL꒬*nmeQhkU\Ltmbm{v>'q:Ҁ`8P/"""JyNfeQaRhSLj3~ %&tVk-bPD3 neJԆ31TA0ѥ&-rZ>89#󂦎E\49쉋J...q.s CbsRf]TUBh "C$\x(x8w싇j`Ww}&3bpz[okҞ>*LEv6a 4]wFܺ;xdq6˾:-j3O؋X(d۳!Uf1{pYJ鼎{_qИv,p!Xʢ┌' .JEho1E0<^gy%y ܠ rkY2ke W_6Ng.2hO t6|$dgjrKS&$YU9#31tKXc x bM7 a9 k/NXlJ',=tbs~(s?+<{㞕iYpNqnqnR-"d?l,5l5292@. L ƫw-FD-ݿ2Jt$͆U!`ƃ2/rH; J\Xrle4lG');9'lڱ dD M tLD#-ۓbNk̳>D f}в~-x94r&lYYfkonH_63y?T:ٵl*U]NĘ"Y=xEPZ 'e`0Le44ɾ>5J QL _%C_]sCs KޮlțȣgIy?H"_b'XB 3%9K)(ԝvxsR@^{J`4 9P&4\:$UL/70!wQZHGi%Ѭbv6 I7Ĕ¤%WͿJitvv:^ʥRTNm`M%@qxi}{U^,Tj/^g b(WռR ,Y -&vFBm#|'8WgwdX괪rҩ5񏪳 ~w}]77|u//޽v? ̂Kf[I& 0tVKUgZJϳoKg?7diOf,}i#unPdmZc̝ v"Lt54 Mah|z2.[Wv׾! ~ Ϸ_Wu:m$ =4~zZ4h1FE% A;&sm@u,J19yfz=Sᒗ1p#OfQ"/9!h03)=&d%#ܧ s, Μ#N':E'ٚ3np?㾧;G΃vwN<`U4;R㺳44-;rqb%؊BlLEqeәcAy9ޙ-]v*RSvmPyB*AomŬ) +hϡa9xaR[ERcib A_gkV& |<#_@@aF"))q`%8sFĩs!%vwYA }0&D.1Nb!HQ!Is I[mh.5eX2,7#2S[Mn}Vz̚vۍːXn:1tAeLfcc}yP)=N"# yo%A$BZ2>& zu*%悧간w\X$S5D3 0шgS;ɘ:`gZt:=PwWe6Vh:l Ep7xn^?тsc&+) )aCH" lQH-gF+ߋVKMdGhi@hx**0ƌc-k]" ^sQ*I<,aZkg|a8}b2 kPgJ"Ku()e Q!\qQ:m ӗzZ }D=u B4 #ԅO \7ȋ{fTT+ 2 9S ),")EmpjB0H32bb|]}ni;fgHϦ7UzJz_,+se4r8-4yS zZ4T2ӖZdUCv}7*WxT Kc]wZWZD_]&\,:D^VZIUWt'# ݳrjl:(Um|f>v%(V+OL;g+ބ}yhQNMޖ߾{:F"evUwOGXeO-~_bMHW&Z mp&DJWb6LRva p^ޙ=h^wx &-߲zn~\ݬ.nSxāw*oЈ%S=4+9pW# *RQ3Qf6 דW-J *ZX)Kx*BɑQ霹%\a[ +[\y#\y7{9 (%,;M3ذ]Ktx׫]ngx1}zhZpi4[7.Uw_())]Z\uBQvwh6*výi&Zl֭&)յQ2Au74L} ݘ1m+ELTQv衹2P]p$2tp%8 U; ^kTgD\bmDe~T ;[{&xu{:4]]~iO꣢(*+gjE̋*4vXZ^+kme,?zzEUnՋSG\!\7#O ڋ4<\>Q[&PQTr$a|QR&҉j9Jyܲo2Ye6 ʀY›RSJBC릷#V'jýeNؠJМ-,T>YY}r0jh>Ts7|:**{d|UyU* mӸ-u'f<.O?T֯~?=a2in/2d5[ә&q0Dm̵ $gQ sRyFDߧq8L>>QUΥug;X!;Gǖ_+mܷ^w5xvdr۬.ֱZʄ2a^!X&HSE57V"FMsFV;|x$yW;[no,tmoqy" u_CE{L%9vRL kX"x% Ur^m9@9z1g{2f{}66i)ƚvd.z 3i$‰:.9Y*m 'h&x92僋 Ȣ`;X+\$J ]'vF $t~KBG# ')ȓS+W7c>#&, knO!AL1]@Wq&(? Oe6~283"xNLS|zCT1X$3GGa}0ƹ /jP"I ޓa QCBZnPVI% A3E3knLG0>!x0 &yehj4BZ":՝A$xJs&!)F ;XR豑| 샿Fn<9-4)x2M'<b *OPis*!:|YyP'SÅ0QK3B lB>'vU>^?ܢQTl~ʹ1ٯY=-IR$(`MX0,걩cgP⇐\! '\Y>(*o?}e+k{ I #M5BYa`vw9NR$pp}DtG§a[Ϊ8\lGQ]=`?D=T[t4Lƃ63e!k([w`*~cl9\^a)7pȭUImu$u0 m4.VY ŋ6au^0uټxܮvmzlE vJa:o鯹{ҕ5yzK^=zOM2E#WzK l^ikvO]p2r買2~+ʡ0>Qcҝ[JDMuLᏫh;0yn"y3YnǠq P&7y&7qt'NP bE"c*8F9舷pkTF$RNq,z̝vX-"ro0K3ƎMnKR.Igדmwvꓮ8P5q{Sb;L ` Uݟ B*$5=mW1-yR[UIKFr!:rp}wۋvMogf?I6J{UYPTȰ!4*`LAk-سex (WKM)0qZ YQʃzqz4쌜C`g-Wr\ N \K@76sgs{7!iӭE;5s<7^THȃ#ɝ*xOh^׭BƭRii ,ZX |RiBH #%{A_򼍪ky(_8/nFgcbAT8XŖyi{Ii"2A=HV JBzf9⛒gUϜi;!#凍aqSDD IX@D4*" K{ K^I9yy<~l|-=!m6>ԘvKܚPfb sOd7{Ʊd !쇽X`^Ek#%"[=L=HsDÙݧTU3c9m1w)}#>՝hLXRK)$07/m ޓK'A#FI\>_z#!.wUir>F nRI&e Bļs Tڄ6J +2߂6j Q *hgM]!n.0~of/JnO盺Ҩ .b5,8 ~rsC]?_FFϣ̋}U|q^ypख़P5lQQ+;ue]J{jO&-LIWBWJ.@}I߂Ҙ<ڙ\nx~9+k88YQF9&@'yNhƟ^zO nAɍ غe͇ZRv4gB*S/d)ke)}66X HimVĜ}S{EPbh7|`mbo*ڼě7&n7 `%V/RۓRBk|7n 99ͬ!eO$7!.zw_Ϩ-U-U[ n^ ' @$B 6{ dezTdUlv 3&'؛^}r?SyX=/h[ !Dfgsrg_nd1d"d.NTyyr'31R04 W!7wV-nc. J!$341Iu <=WYAR%?6󫯿X՚b^a]+=)K.*>,qCIN$i>LSeHiؑG ]a9ǀS."IilV GV4 Gx"klw+NS/@1Uu۞v"8 "*"3uQtrQfn;]3QGs(yQd 8M˶d46&ږi~i,鮇^tItiř}$D,YI,8_&)">#^K%A%#mĢ_(jzeͿbβXz+B1-%//TP9K8kI-%[v[g8ߊx:Gp;R յ !-vRWj;p/"\{hO֑V.N_*aI+7'_M3埾 Blݓ4- A1 l֐L‚H)J; 'o3wa}'fњJ4S+Yaf?FrnP}v_ݕҶW(mvi+&T*p ]V%UpJsK6*jK%NW[*z&] :CW]+D+i;]vtK.DCW g+Dj;]!JK++@vJ "`Yg ]+D['UAYX{:]gv>z3]wZCWkt{ڸA+_·}0u_+oːc|Ph !σ(5AVz^6& cQZ`j~} ^c/Fc~Mmco S*~le[Hy5~:Ŀq73` np3/?yʶdnhts(eٝ.\v%6<4NaXߝwuٳkilDtVb&%fQfP^咗^o 洂B8}ZbU栤RwH8VwF81(p*h%mp*(5 '.U-l*0d}:^E[8r@թxw?LƳ;;0P [joy%%n;Kt Z%Zv `]BWV1v*(k.=]]I%$gXw \]+Dh_5RpJ)-T`\Km;]9u Je{:@jU,Bg0hetC{!ҕ.A ;tU**h J!h)]:CW]+D+Y \tute:DWز3tUЊ֋R2zsL NWk6rEփLW5{rev]6zLT}:Ċj!2f2,?ki}RL#I12AF$A9F@-[>HMm/|ӀebԋuZr4 M 9s!f@RPSYBReot(8ZԢAQ3쒍5ם \Ib#`n# JCyo#̠Qq4c!pWBÃ)4kH0]inSF|{GViaK!* ]BW޷+DuOWHWCt`;"UtUP޺:DR+dw`eW誠v*(Jv `K;CWWXWh}BA)eOWHWƢB;DWXXW˿vo[ݬRў> (vn}UAˠtUP>+xf1\0;l=BٞCiZoktqkakǪ^,L˰+GxdvMNI)r<۶H.dd>oT6Sodx}8^x8L~bkm^]_zo&fnCOm<I9n>?ϻ?yf$9lDXF 3T&KR1Ret)ƋVߙhPuHn8UsJx}8S۸cS xhquK g*2?=TsL &c2gꐗn#_ܕO#WpD"Evτw9Ry*?WҒU,r7ܘH_8/ϣ]Z(k%NyK^=>fy~y֠e4 T'Ksxւ_I^!Ս46rùCLR<,lâr?'6T-y9T_] j%ڸ]zپsyPm8[|ʖv^kr&sZ;Qfyf yZoڸt?@xL66`?rVayH)">9!`a_뿡`q.;dumo3ܓ {Gm7=^SQ7V֭[odβX=G(f傒:N*jlZiREk9cQHb9XG&9tcm$ !VD;aHcpv'&ڝoIAl?GoZtz~q|=LS%xzvB^AeޥWn Osg@&En|r|3'űw%¸Fv(> ޝW@W ZeVPjLJ`|X5,!'+}Z.e%F- ^pG4oɪk^Y$єыl+28j=(@(k.5嘑x4)bVKhH%V%쌢fLIoX5i-, 7~ dtMJ'HG7Kr/W/-WՔ{tLP:[ǠD H l֐L‚H)J{]hM8Y=<>z2x1+Sf}ۻc䎑d˪Y=S\҄H5jQN D:wׁzi{UBF [ּh6΄.B&YZ&-;&A8X[DǜTPq4\֥HRxmLg*pg B6atZd! lң 'SOJ#^}W/Rj"؛>{2}VXFO.0rNN3kgɍ'.E|(7v)d>o[FOlG aiBRp:nyAI6{J{R!!naŗ0 vg0` >.$~+I'7jvՏUb%֊T!Xx~~2~XqDžZ4CRn1GLyv:y][_Ȳ hh)UVibgkGubE%R vU C:g5n:'yuYY:X)$ll,blÄ(X12 r^G-ƗbeTm&&ֲ~F -Ojn5\TJɬz]d J"cjcgliKR2dmy'$7)&ׇۙצL\U;EgmL5cw Z%!m!J.( MHu`j K@J;db|BFgl/u%–b֐LV+WGW`<,X(]%{sn/\yl-i5a݆ITHfl$BT,\ƙIŻ ܚ=ԳcM sc-M[و"%JaD63/oV /y~8W; bHk_Hoaa4a5 a(:u%Gˉ]9|?<4Jq˗4wUl ը৥|ԁ5_&6{ٌ '9`2aJP <Ҁ%*V4KD>ca6  `d)cv8,i´9ۣrXf3 Lf2fETJ+B HBcJd_grwO?gλ;O>^mV;_&t))Ä\YoQ0OR Jc!٠oTR" y: y`G<T)+*z}hRA21:#kl>!&a;[Ct{#s@I0= d"焆Cev:Pu xWܭ-ͫh4T|N'bɲo\f6A\@!6,H%-Ɋ]uzd;t>3z" aBUJ1eldlJ>H 7}o+ x3w~!wGcӶE_i{}zkѼRbfٸ]p8iuכ@.?@["]{mv>7Җ#({Ӧo{vLk l[4Mi<8~u+Bv?`0|7Or|եӧƓ RfA-W?%rӻeϝ6bM &PVͿiK/e4-Y7ăiZ,ɈZVC?4NGĒPm~gvuBtKI&" ezҺ3֓Wykb+I{GI锃8arݏ HE1t) 0Zy*׵ѵsWfm.ѹyMtdX1i󠦑@fmo.zunW'0g#n~> yZQ v6ԅCMpE2U&tt;Ծ|ˈrvǓ9-Y{COPECmlFc1r(ޑwZC1nTWo/ŚAAN8` k9Kzs( =\r,l`)0*PXFAgJgXg슅c,=>((vzV5 v獭p,-<M&GW$ɆSIѵA(lv֠Yr0ւ,Tk!v[ҩ< Y j6d:¶pHh /"YuȮȹdL}AθcWvQ`oJIEr&xI@h)PN!kCG!ԣ #NRt-d يN,cM&H%Sӄ|"\tjIs7F}=01f76}W+"#e PPZ!!fAXEIʬ&U_"T2b6kw [lf|1T(KřcrH,y$ؒhUus7"~<&ԑqq,k|qɎHh.q[E$Ѻ $XcK. v!v;<'1P䛟5;vܪo?*mż(Ap}㗊~4*pT'hUC 7gRLsn~Yųyųʫ0{}Vx*|'z?ߏ`9H=N> kW(t'/x cj,lAL[?!& A&ZC T nƚ1Zm?L6=6{(nEvQQhKQS0M=i@!kZm QFK(-T)Bi Hod@I)I…P9[mOcH /6ο bjy Ӭ|f_y*޼1}޹ 2Xw3%e jR٣Uh .υ x,eRhN2 kB>IIYO`A#d !Ɏ3rֵ_w*k:%+7Z;ʎ?nh'-t>4.,m;w}!W2`en*m . , ^oډL@Cygo/`ڳ9#] 'AQGlP1>2'4r_"CIw&{Yqo˪VBq]D}RԵe7wK]xEf6I+D*Q4"Ab1cRwqYcY "b`geQOc[nV~HV?EIAlKj{*/I*VJ\tf"& 4~8J"91%x#aO|95,Rr`E@/x+3U|69L#VLs y/Pz?go*)UkP+! 3#\UgQ.?gOd\=Z?KoY̓σ\K?u3N鿾d:,5.n͹ aqmy~.CyVcV&doo{;β^%:#_DMFnZBCflQu4PwFlnO&O/R_UYHҗ6fX l>Rp^]ըԯ{|!Η;gx{͎}滜f\.z O@힀6~cԂvfnkw*Ytf&+fhff߯&X+䋀}9W{aNC$7ofczMYz#[ ;7ޥ4{U[<_:Chݖ!䳯vKP]|f Qcҭ;;6v~{>64|fHMƦE2 Դ=wd/Y ۄcY=ML",Wn˃&qϝv+ aw2bO(F1D#Vn@Pަ~BSCKfheCz1y޴t nΊ$ZcȇrIgrwQ1Y4K Y .k 颬6<0qlb&6;*Cza繪 ׫!D {>WI, =?s`-.#+ES:R"&}U) 1_='ؖ0ίt9)R XȽん;%jF-'Hʟ,ΥMC]>8oZX>6sEαߵ1Pm$L`J9&=E:M *8ք6L12{gߦ&̉LCgVMf#֪o6](O5e8UeJs4e,3&X FlEAڠBdʨX(h(Zַ{ %1ԃD!W{K]HAhCK4 x\2ei ΡRp9AX$,"ڡ4:NNmќ}ʒVP-'$ J.}d<M M,'VƗV(/u[&Ք+ͺdM)&_+_+/+GۛɃdd'hmiATd3Dnߐ{`3T[gl̡*eqm y -mE)P5Ȃ(T2( ۣk[oug(,/bG+Q;h&gBҎ\HL Zϼ`@vG[M A e_䔉B1͢J ж۶uֽ2vV!\_F o0RL'XA2v;U7ʋA̺@C5I1ynP!E8$5H"eYJ)$5`N$x7OH X2Q͕ پ4qPBEqVY{$Yg ʼn1 K3Qk \yϒkQ\oYZUrgČ öLԜ"+;>9+Y>Z 7ԓ:qSL+ԊK{?@"[ Q'gAfpb@d0 8 E"yBȕYuٮTT-?N2@yZxCh)hY휲 yflSaí͉f=AޭLk/=QH6B\TWmiyO+ʹNhVS/U$ ގH̹CcT@Ϻ"|McO,| } AmdUN,h0^Iր&2d;~c~rshm+e0/qvVo46)& R)e08ل*Kg&ܙs*;/WuZ&Wo_%W?'?nv?x== |?}I"FOW_~ih|^OCA*Q.*d*ݯReKGYrV,xu^{z8wU k ?Fd<mzx1 mw(ܶ^3ם׋dxm7,ҳ/J(ꛟ7W~X jqS]oYM⻟*ꔛ*{rAK2웳gcq+*EtyyZ4Z__GE}kHً1#J/P6̺‹r>J-AВzr6NB Z# +lRSsP1ObJiyl?f^ނzi2$Y&B)(ɖIzdP/Izy"MH,6qTJKV+㌶An͚0qdήCBEꥎƨ$O!u^ )K$ЉKmrTMhYl N e6X@eV f(5|"X#( PfȆ1@Zƛ_J>sBxF ۲U}3E'k,;w̦~@Ce&/N#87S{-3K(!)c.%~ {8x (~CeMf]tKy~eIb& S*s!>T'cwRޡ1#ʺ}.>|Xb+e'ThMwdPVǣ̰v6e[\rs]oˏ^&–[\)I p J%E#Jtϵdy\uDWwD9#юNER&%@0t*< 2 +G(Xmn7$ Pc8 TzDP:eBQVX!(Zrk j^9fσ@ەՐmioRҳT>1mjz_Y/A_=*r.&ݣ3%MYh_2Zm~Q!%I.9L2\}VUFzt8.T}Ү0Ud_*tQ)] = XFUՒzteO ]e\t2J!ztBzDW p ]etQ>7CWZ]XapՉCF_C [t tu]GWy9qpKqyJގvTQ(,TLqcJ|*.RTT_|T}VH^H(/uqk0}~f3s=|*Ҭݍ};l}o{Z@ FQoO߿}%SFZZF*R)/+~wd^7 ؛W88wdIiILA98Aŀ0mvK#`7,K)aaiDK:Kgf`\R= ;·VUF>ˡ+!sUlho*ÅEtQ22 +Gt1 W jy(% tJ).]!`CWN:wZALADҕVJB@CW.'}Vv2J=K+]M \x:O+Di(#O XLF|86Ա>~CW')EKzАH9wJ!<B?id׏}$A%ΕEV NB ZìOdM0O}֍p.p&0S,ʣQ#J P*On!1=r J (uF9fdEvVU{ia7v:/V_Z-o]p|V~kJu>֟=Q)Y4IJA9+!]RFFM'WWq^=e+a_=ewm &G`,N66 ,gCZ3EjIʶbVϐ%Ed* \]]ǯkBl{xdQ7?o~] .,s7ث:}V@i$˘@pbG](]uT9 6"+DT΁d }ڳw#լ9i8rz'Ok8-9 jwrV$cxYtV)5WTdmtaF%OɎsHjFmMN)JKmmʒ R=Km "YBU`\KϹ"Fصf쌜5cwX3]gl MǺPt9E&,'=Yrz;Iz j?{O5vQ*U=LZZxcHJy&mFi-#: 4 I0II66QCАpduYCp,۱5v҂-Z3Tkێig +l`jB ¹L:[.Ii.0;Շ@*!2@^thH$d$(l$2(:C*HN5;ևs>lGz b+T#5iĝF襒.*όDc:_DeC$A`9gjDg9cEoHpY. )D HljD#yC,-:ֈsFtԁ⸞ 6:; "n"^8FGFB率#O1K YkP{?p˟5O𴱑wF6T90Ya{[ Wܑ$\1%/ɿGMwR8 Imd54U#LrONUn<obN-fyev]yկYlL˅g:|J%gI4rS{b1[;\~PC|@¯y$~> hFc 6 j %B>N"$-O[i(nE"O6)rfItq,ߏ9Ϙ $Lb`O`wqe ]LY A&PFT̒ېgNЙ J6ˬug䬙9 >g?@cAgzdf%!^ j\)7R ZP?r J!Z_2F 7)!ux*x }) ZN mM 0E)X;#n!INQ@t,`[r?NԳϭbuV:W}q]o_w%Z#u&EZk~sP?5deDY&Ck/R11^F=`{hYRjf<"c邴$Kޞ ɑkRs KT.g#B \$= -E6HP'2x< Y%#&#FJhg2XP{ÑZ0FbɁJ0&SL/:;;6]4hKINXhr5$J^Vw_yl֔bX, Vs#s3 #%0LF1)1 nVV0ŎZ;n*Ӵ M, iG-#q"Db@ZL4){eС,ZcM22[A$):x# X{'H)*!1Q)˼OpQ2ZVc"hi.OJ#|imyjhCwpZ* 3#Wh+рcTOMe)8B&7y)EE g>⷇%0޵nfgKӁ!2$U[OϸTZ}n!(&xy?cFKe1xBto5C]9:Br1 Ke"3SsrMH/MkHNԨg!4mP|LD3ry6:<<,ҒSpg/{-MHk_d+NK(V5+3j//5 S֞yӜxu>9xΖHĜϽAOyr^$fϋ">q[5j^ NW #V8_9"%!L86:fbAb61{-/^9`:{UzWI:.!~GXu8jIwD> V*-o;;!v?=wo/}usG\أy}+:`$ð`~[%+/yuZ yq}W]o,s0.%WqAt^.NjZ!+scB~x+iC+[O`qW^1_븺\xb ?lV6qtIO,v?$ggT4s.ҏd;OlbsJw~Sfʥn)0\~ iu6,En)}`LB$Yb䣰R!21ݾN':cm8Ć08Lo^qݝOatk/]y]ΗIq;ghOIr<_e?2R$3ZYhזs9"%cR ;r-$/ k_|kypy0h2RA[v%lQRh{q|0;nc"(2tMw7>(v}:;5ME7h^q|X);Vw_4{k1Mfi kڬL'x^Z߃cwY4 +%LJT&9W46q ^6ᦟ؀T}:A5D!a_UWIn {Zvm>0~,/2R&Ww{M'fWnA˽~q9|kmZbhBZ~H`>Pִ7O94-X[rAwTF(}jkAWgJAY CW7G򒔳+e:(8$$Ι+2Ypk%FZ`YkIOPӯD4Rl|}-B5,}!'0|-}df=#oTV?bʮr^˽6a\r:ƏߞgO,yt||4>ȕ=Z%yz!2MW(d%#)F;u9}nсс @F <:LB^Emmy1tX0vuXj~ת}̯ʝ 0[٪t|4^cz6~o {ynSUoL|PsBZ0r;KQlMs!ĝ8)Rmwܺg)^GJb~Vs}cVn8 F-6"2LQ[lvUUE 4Gn=&~<̮y_Ƽ?)|vJ'WJ1"gްwmHi.c WCR.}ʩ\@KV([߷13HoAvʒ)=ȹ1qDya򁩘UPd<6(vGe-  0JkLQ $rҢ Y2LzϐC8iȆZUjyLB Xo;Ч%oz0%$q!Sw"vPI6HD6J$8ɉ00>#\g"8"m"LI&AV`xۻ8g<_I,29IJp:i-+|"'ddc` ]C$*`yj/pY!0p>(L;rg҅6:d)H&'e-dzO@"SapjZ OF[e]tv_Q80Kڞ8R #^o ^tqLJSR62HcT( 7sYA$HgZ-'s0ȃ?4BEӦ8 3Ǥis N sJ||OFOn |Dï F-<7?]r ^‡,VU߀Ƶy.F:3"]~=鲈/ۼk]yAkyVޏ>]M۫SL s@-T˞/Di}v9O zڿ)61eS ,iiU|mL-|ےv!8MYDjebc7;:g mrԷxب{%qʰI޽ M_:]]9:)-pq nhpH.䥷}urvf2wKK^_#F=)\m033o|فv[Љfݟ eO_e^)ibm+,yO"y{^>Xo- ٨s{c5 s(aݾXȶ'PtNiJ 7yzvgO76%X] 5s~olTH5mpY2L}Kw3v$hj$r~ 3wd;!G?bQX.*)NSh?믹x cf߿_Oo;KA۟y˅#'57+Kn|N{QDlQE\ 04$1۽dNM23„,?h d^=nZy9% -Es399{Bb+q ص]%n˟=҂|$#?-(AYHz]=feWЫvf^mc[=goopYmEp[݉ŷICy%z>hz/i{вH:<9m9]ώOq{D E1a`Dn 79aPp,V JLJ#_$?.GfyꇟY;5,Wϼ/cCSqs]]u *:aى 4r^$H}LICZI418k _'ÁQ݁:@;0y,_+,`1&9o|ʾJ&WRyO8Y8-TӞJ=eO^Gu0橀@e$8 p CÌSڼ.1MYsR чRbt6s#'Nc0ĩp'[pBDP+ $G]0@q+٨hQ#0s}jЛ~8'x}x>yY܌i)DӖ)~z{6U[n/ҥ;ͩMɢkS_ʻ7o}Lﬦ.7_F i4:Er6Czu$'I>V /f4aO|55>h2Bzh'{K (=Sܵ/8uū^|:$313)Z!{đJ%SN]R0FPnw]ennoR:s >F_1̀frɓǛ&OmڛyӮo]X|wuimΊdoҝi+Bn;\kn|qkv'o|Rɘ~0wͶeΒKk]1tlȍ,q.vmxԸF煖fNU}z޵k͓AޯBYᄎ!yfzKҜթ7?7~L'ᅵ)#oq!J6˥ikO/dr;&9TE$"kTp 9yI;4d +fOɄ~ Yz2Jl1p* QvM`܁͇u 9x!r9$g2B]+Xӂ̌rȭ8pv+ cY~?Vcҏm>7LU 0L5a2/^TǫIݞm ~*|'irQRB̿ՎowAiu: Y6{zf msN; 3$8 XT<+@S %xsF@Q努MfJ B[}J\4pJ(|LI.Dx"dYM=J`'YZ()9xKl Җ]EV3| r= ׀Qhb!w2d5Z )FQSl.X!CR%"H􌤋9ZpI`̘@ɳ$')F/x1dQM*H;M唔E:"  C{|Kxf̽N`N1.4) *V %$\7&:#I]y05N-&zdD 3(GEN> >^U.Q."jV)0}er0$?-Ap9!HqnVAyCW-qvcbCOrU!_ƙ$,23/wT69@0Wh(U&椳t:kI.ҙkC͹46_zv& |lRN9 nѱ](wHdDV H:& ȺʐcB;YYϪz gٌ9tB-5LN%9Qs*Iq849Ia !+= Tez;A5@ ;GW%a@$|yeIeX5(lůV5w4I_ȓ-e@С,U(lLZ4Z+nކ`|vNk8)Bjkf}ލ٦ PĤOYӮ-GxДZe66Y6h[H*SS*DOOE-CVVY>IxNZia<5s2/уLgZIQO=`lNHr QmPY;-.KlO`v>|4Lvl{(ɊX%V1e:$/=KpZ]u?qh7:Gbrq1GfQ|6,0gsGUҋE2x.Xœq,q,8uȫfyy>,;sizxkog/?Y^ٻק7۫e|퉈lAv'K$?jk\x =x?]Oư|ObO)JД *:TTYi'Nv2__}9vJr9mՅdqm̊=9p" {RZQeӴSL[9m'5ϻ}ƭIvy 3Ѷ (ɄL5$-`ۼtF jj'"^2negn?;N4&=hxm O GJmVy}d!킁0C,h"Z|k!rG]en^|5!2=_7j'BH%#o0U)=YρE:IH9|2uQ|s?~B\=D`P78:8RK19$ו NC=mG%B٫8)gr,+/}vN+heM{# B4TrҪ!;eopGīq%,o=?:^j6.ޟeÊ?,(?/5͵|+6 Hjn$We=R2"d\yK4̃ [gD|b?rзS|svgO/o\ R:;7odg%oŬ "1D5moH~jh~F3G5kƳ"6K9xMJcl=yv7˵͔ӿOYhL;LX/V|e^ޣL搯oyy4{a!!(9'G4^Z'G4NGaI)S.WQ.W뺡骡]#]92 njhZk?^':br#`ojpZ/NWL,L+of]0) ]5BW cҚzJRNWۡP:7.[Еjg`Yw->9^zpLy~Z<uvq^6.Bݯ/m<=jְhM$^-lO+f@SҮQTA_~ڰCO//rm/.-+5=[o_ZMCTJD?TeTL+;sIcYpG޶6oZ݋ZOj޶4zGmesg[/pu_o]nt-W^[wx|PnJ_Ou ^Z %ƴZ\}_v۹^ʮZ_tnq;,SOf߭-/Zt'|:ՠh&̉' {9Y03X3uI"x+xO^o㽤|RlZ'5LcI 'OVzߓp^誡uftP=x誝uGt~"~5BW cRDWGHW~tfꆮWn6JJm&:B$ n ]5BW cMsWHW)ˢ#`G &lhJ1ÛAϐy/jp]7`Cˣjo(ʳ'uRn,dhhJ;M=t`]m3o'AWۡ}jrDWۡeި-JMt{[647oߝV9|ƫ-jX{J!!ʥ YUc|*jyV&+X2[c:),7MI?f-{e^6ZbZǁPsRD1fn[2ke*1-DY}:edOkme>;#SCUcȃwܶ)x%[b6t4r;~1 PZSr1D)7D WABZ1oz{a툦wCKei-cvR&~֞IUjoHn pꆮ;]v#+y]Sjp腮ZJj#+t0ظ~\ͺjtPi]1ҕ֩\^ h|UCDWGHW,5]]5BW Qc4Nn5 c'uЕ~`Wm=8YvΉ?TVh8ѤۡT#[Ƞ+=]hwY!ܴhn~.Q/޶ub޶d?y#Ji{=o\jhitP𬉮Hl'yRWF+g1 h5*\u@t4PăI #jh;L9Poى iήv ]UC骡c+Y)( ]UZFOW@S0xtHJC]5+UNW (銉xunH CxJk&:BJdf ] ]5zꪡ\ۜfJ}]嶺{`p+8 -LJF mAW4ՃO)I\6ԍwalQӳVc0"Tmrnқeُ/?.:[\j߬ jQ楚el?wK(:l*~_yG/e.R |D|,j^\Ap}sqcYtV{kyVE"A_O !wff}v'7c>ҫkn~nUxi7)+-j3 a=5b4>$;k~̩luaf96e _nͲap@Y >C?nSә@wO=[Tt#ϡ uR \!\=?_ ؓKPĕSZEZ9U6zZ"2YS-i pٵ;/]Q%$kafi}*O_]&4W_Ns.jI/͞ b#`ʎ`Fa4I gcZwtyC$E죓hB"dPFb*eF,:mH}'yScK4*iw3 jr&W0T>;cq&PE.JC%'9DmDKPLvh])Xb/%t %Q Uku&Z݈M{Mrx9$&[՘U#4:BIț")!0- s<'nJY kƬ&Y1Zs#I(k!벳G@Ǣ%<[}R*`G%Y:݂6Luڵc&aM*: JXUmAFC|gc*h*C]MV)& t\j3f!V"vpO ЈiHo,q;Id4jZd#<%} r-+s\(F$(U9P3FD+IAfڳւ䪗1(!;:ƾQ>0yi*Z7x[ Cxqu8~JaIMh=BK7+lL`5 j%&}I(0GM(T6fȴ,ƓR/&f mHJTap+VP>IcF F זBV=v|Zu(+Jq%H+7eCr`E'Dg׬||Z ͡m[YqQ۶P(l`F[Sf A]46)f-a5F6P*'6.WxJJFt 56=}h'Id-t*X$0V(m% ڪ & .odk%I}Y%"(ꋐAV4^ig!*#2 gQd#X~.>C Avym$2|3..)fC e0ao }mcLhWRD&)dgNUEĭ1*ƒcd UMIAs-c|A6yQKMga TrG^06L zx N0̲]$zAB{~SpFX7G SdF)l2quJ]1J ˎ .2 ks!$lUlUW.u ]v)&x,̬D4YO2"~ӅKq9/ [cy]jFէ+Neg,,Bc/`ZUB?Z8̗KaYbF!qi7FZ.9@rzUcn(AP,BzǠ1iBw^0jw%ӲzZb:^򵿎G?OE.9in߲:TIvY_KKh[\=`*Ee\8$4PdZ hTο/t2y7Xjzћ_mTz_/]O_/l+糓c._~⑛kWՆb=gzh{t^4{2?}[!"Vg3\0{RV߄=KFܞ~WRpUp+W\Ydތȇ"9YnN9[c2Ӭkc:mou~Qc8tZһfJ)h}|꾼p~_2o>dDOCYL7QL>[SM+hdV&9'Vԙn Ze بMCnJ]6'ido3q YbSwM4mL2qi-K$QDYeM5QDYeM5QDYeM5QDYeM5QDYeM5QDYeM5QDYeM5QDYeM5QDYeM5QDYeM5QDYeM5QDYeMRֵpPyfV>x{DY?c-9G'"+"+"+"+"+"+"+"+"+"+"+"+5ڝ>pU6`\U)%1Wު"+"+"+"+"+"+"+"+"+"+"+"++g쐀+l焫*\!cW \ii&+"+"+"+"+"+"+"+"+"+"+"+`d"`W㺢uG1]a-|;gKa݂[o#aU PRO^(vw\›U`+^ VgYٓoy+,L > 'ßϾL'Wg8вn0uRCnʑl + >3 qb!흟y{3-rCfii凴@X1WU\\Ui݃*c4W(*rcZ;6ct}^\A`\Uq>sU5*gd2n.p@ +$UZkWUJI1+Vҋ2WX}u_檊hGWUJr{HppTq8sUvUd}Rmpu`jWRZڏ_\k>bNpo\H`e\H\}F\HJSa{J\i2W_?aẎn=,}0 ':LH[唎lw9IEkFH\u&NfZ!`{F$V՝kV6nVǒզu4H]^gqX5!^iet9 c 1|kSd7虈uk53oy"щк{ӫ⼸*$nToЅvYm>(/ αl]}eZ`MޮvO!_7lZ ~[yyaIW^ox?uxdR]S|2$HgKsv. 2٬lCxvTFY}VH.1Vb(u[rm\.^oazTM*yWw2ͫjc#;[o6%~ t<z~\+\ۏ{Zo綅nmXh{}=1/Ĵ4iS,Y!U&Nb]+֋'\m.%p@%i(ͭ1N\GgmЪ`v轳XkZkW@2Bm_־I\jgoU-Ez[;>z7ܶUjw z_YDK>| yA-e :lE@MpN3Na.] ::!jϔ*(44ِPV&hM28A L( `S셈Pʢ%D*X$:GkK 8 ~EtEX뻱j'GBvoix:@o`h}{Y'}޸?r8gl^͖y ǟ9*PP*e-P#ﴆb}݈Wo?51BN8%o\.KN\|1 &υsff\6ӌc6B}g{o+3>,`ܟN8l_ ˋ+GbjATEABY Pyv;kҬHM95-P@M(+lugQ"u};|d]cn&~ĎWӼbiDZ;JIEr&x Ah)PA!kCG!ԭIGM0i ): =df؋NH&`S$钩ekCb.:N5a3qa 7*0Ƨo"o'Dܥ ԵxSAX$@Ҥhe j/ X~E)V`/}c,/fd8sLN[.%,T#b3q#7ď:2.κl%GPа.*q.,{ˠub)޳T&g)5}C,J͆h 6`#hE9,AAH7g߳Jk:V8%곭v2hV͓n|Mݧں[N/jQ]g4痢*L)AI}e狳Hs@1LT]sO(3C/3/ T#2Vf셓TA>`aVݝ/c7,"3r7OE;9~9ٳ;R "XBNRg>C ~rQQ1xdmP hO,PuyP19÷ ,DCn{61 P`R0A$6E8ED1lFΖ!OfӛZt[jU[$p!Q;kN8J$!),c$$7 xM>AV9!׆<$Ѣ@)I5麟SB i9QHvZR @'i“Ϗj#FoQiTڳ;#]Mv'AQb|dO2ixT &Տh7ujf-C%Cz)j]z d#ī>t+ͿrOERlJ,$vJX[qID[LlZkrpOcvQD>HpZEMuE*PK?. Jͻ@]\o7w5v!mr4{in'x|'w?Ogglݥj\2̈́ P0Լ^M?.s ! ٣sW~7o릔*^MecZZB.z%Ɉay|~KcoԨ&!/w\]^o|WSwW'?ۭ3L3o_^i?xrL7|7y3LMO2_}{A߄ju<wu;żOŚͯx wݮ=kyo?uH0|_0lK*\-~/8;#V醖vfZn81 QF^7Ax@A~O |ˍȫiyi~jy|vے;m=umla\qڸh+ٖCd](ݥKZ機B<'G-018\dv7M-OjdՈuQ\bB|8g6_ +C5cד}L:u6T(SSJ#ӠlC8ιJi?%2pS`kJHPށ"ﯷ^C0%ɜECڑʩ؃d^ԫ3\{د4x bD-]猖b9]J y\_6\]ztg]Kl X! du=ws/vIR%Cf\>G-joG^f}nen~j+b7Ggf_uQX^\d'Bk<?Mn$ /0 DsBX_7?#߾_x7gt!LWJBd.rp;~GV)A,۽Vr[.D6 %OQ9Q؃ H,N]J{\b\M.j'@JQYq 5LR{ |dJ"nL{4U EW2$&nomqw~uglg.eɵǢ)ɠdQ2׶-9`ON7<һwg Awuq~RLbZΓD"{Li= 8B+wccZ*vhL.e i!D1(e3q'dձ8P[g+0 Uٛ(V!b:iuv@wevV*y#슮㕏 K!W]W\4o+=czʇO>;hz;WxQgFOC9/27͋:h.<뗋[pUۋWobw aB/^5oVHϢ޻o..-_;_b?rnV^Z8znX֛nכOO߯q,Ƿǧ/Xvy6}|~<{EvJz+>h}&wqxbXvz-3 5UN&5;T*d19wtbuG+ъ3b n⃮@4Fq@$2*Y~BEW1+gCLkYB 6 {T{&$Cɢ]A mJ$%#b*FGe:A`p n1J4w8t*K//8]7Nڻ1 Wo-аT,XZx,uN}9骍)eJ ]]臫eI^>I^n2TM1ښUUm1 J,Jl8? KWJ(_Rr#i(RUwUȕu&Z>0 Ά,Su$f JbїZ!0 H'y[HfCl8⩅•Y__q<>P_g.qB1-:./{w\^wsk^zJ=x{{Տo/B.rFwtEQwu٧[8v܏ ~!1.˯cYdܿ$ KsލowgWJ ׳Nxe]Qo okb xBS戽GlHu~Pq^ ccyZ/}+~ Nzh.tx%6-o=ͨ/o'o xV[a&O۞I=&f`rLm`QD3Vׁ]n٠ٜǴ~|)Vs̶\^ZBI!S2^IIPr!jaks- ϾSklth~R;5os腚*5RLxNX#2Mrֶ|"*ژzp(DGҘ J ]w"~Pl g=~sbvr/>YJ&8j aa&U8Ʉ 5$ŤtF jbE 8WƬD 'X{Kkꞌ=]ơgqmm}3!ijdCv> vt`h,|>Vu' q/z K0m ɀQPL*(}R20{cVyN-~+Y3":,Dѣ㤎M}q>;Q ZJV*&qoq -EZ"]QW鼮hTpSrBePq$SJYf%T^VƁ$B4PrҪ! }|08#ċqY~Z=q/j7TgN{pMwwoGu43٠ܐ6 { RچY;$jlG]JPp%f.7b,Z kJ^HD/eR{dɾJY+=]8R.:895w&@S bCT7׶' Vj4Ue[5XAT\@VTY@uU[~5vۏ# N7`m30khFhcwJ\aB+lj.O+B(GNWR鉮8[ ѕh.4CWֺ\-4 6DW ]1\݌1h;]1խִElwpiQ)qv=9NW6ptZ~?tJ=2cnAWv7]]g4dz?}M!4уo8f>;;- 7/p;ÿ]\y)- K8=="JvCtXH]]-ׯڵP&@[LLTRϧynv]l2Ŀy_.TTcMVDWOh~Ôn\udғ&il>ilN)#ggdG@lW̓:ٯ(٩oMтUtZA!׎3ps֍`( VlBx6 =(h(饀+ᚡ+b`NW,ݼ!zWP9~X .-$%=0Ѵo$LO:gIّo4-tۭ 4 M\%iFiQj54(cipCu שV7]"]Z+o.Vv(':@Zْ1ȀQ6CW V{:]1J%':@r 6bMFnt(pti)KhG]1\یb8F'cʃ+d;; w'C WWLtЕ۰ݑq)g:>]vz>a_d,1P_͟Y&wo{v1׻ Pv(@dW$]4wU \ hjˀ_P-5B2Zeƾ2JW[g1q}'KIFe%۴U 4B3FiFDHHeZ+h%CP] ]qWْ˙vu ]1Z/NW =ҕ1Z]`d3tpu3th;]1J] ]Y+pǻ+1B~CQ´pt,OCtEht3tpk7ʾDWCWhQA {ֹ}F-~gQ$jn-ڍ\I%⇢ifJôʉ5TCtE5*iFU2Z7zb^Otut + uh`F ]"]@vpjft(d "]YohuE ]Z)`t(yMK;D;Q W6CWv_'CDWHW^(TՎj ]1ZNWҩv? U>q\سroGj[ЕjGzLaD#"[,вRX#Zm KOje[Ym -J5ՖQ*5*ogҷ"F]"]ag1G$(]{evk4hc;>E+4MhA4niPDx!b{ ͸űr+BlV ]1\5!cv22NBCtŀ];b3Ntutw%"]3tpQF;~cQd "]V/QZ|8b4CW m+tEhAS!ҕg;+l1wY; z? 3;,`vL2!FPXYr$ }%lIlEDݎHOU{[ŪSrZ`Q~fwCW<6uUHH'zH] Kr8]Ut7RԺ-[LUGQ?Ȥg\i%>83յv漝:+;AK[c`UgbkiWb*1 dQ`„Rv:yRG坡+D+[OWyOWomҦWNfW%QU* %U% M#\źBӈV۶4l;1=MM ɉ&+LywT%+th?Q( DR%3tpig cs$J"])i4풺B^Stp ]+DZ "JѫHWZQMu kBt!b@t(e]!nwRSorB-; ס+,5]N*vBt+z`s=iGCNs([vG]oyϓW/UL׎!_@yRG/ByG/m3m veE}EmB xI9SJNZ64634p MZNuiQrin(m3b;CWul}Qj+a]!Nўjq( ҕ4.\td]+DM QlOޞ^viS۝eTtm+DHOWoK돴DtgF̈"Z`PRڧ"] ` ,#Ψ+ʩd}0+v`׳sF֜~!Q닍NCW'<ޝ; &-[͎+ճ7_VSqPz2F#`ԀLSO36q4ֻwg˳1pM vt 7R8ČA+ߝ;|W,f؁˞0=6G@1?.G9834*v)'(/$9<GZ2вCZ[yog) t4JF5/-`.*[@%H5hDu :k@z-fȣjߦ?on)g[o*O;iHNLWN9?A'eVMvJ{q@ǘ)8G-S˨zrXvٺYH\sS:H *(Ax^gaTG'dAZ+1E To.ipwgbj =nt2> W/nv.u(^=ZLr2[+mxЪ>\li.ˏQca\\,rvLi0o A9+l6xJi>|nsk;/2=T< OIk$%VU]FK^M?fn<\- zwwA,? ObEV>oRZ]O!?ϪuI1K/G:_O.V9ʻ0Yz7XwMUOyP Mq-e.6-vx[+)o0[U~>Z=ANKA@=~ew۴vt5=jdv)l3ƕUW-Ju{ɥUIa:,&.sW9pIcRex]Fr\ <)+X^?ca?4!!1]LMq'IevO)x>0z} {KHeՙYJ{6j{b8h/iM}s ᣸l_?em,cиkQ&YI@}{EqFPeާD1 :J%+,>VPhQ31W6 $d ~89~m rsS0v6Mr|zF+ЃrOoo~~#\x q)rwУE(c;6Ap~2KfRE$ l EA"PvR\ju•W*H5X ?h4U.a (JY4Eה%„sԂlFu_\"j87;J뉧++ʈK\gdlK6cW#2S\EؗsGj e]sumIЫIXq|Wr.@Mo& B`nz_Ԇ-}2[rR7Pn75Jfۋ^SցEלg. r/\w7MUO.Kـ7d: Eu9dz' ]ݳ;7 , ; XXkG%o~L%odOy˫X:~恐6 \DȻ 7HlxQ?69?%u5c4_޺XؼgU\vӻaH.堲ctr7•2qCM~`;}j9U2uNQ]gx;ocB$>c[EHVK`Wf^a8gݨ3IxJ:< j>$5<(+qWiabWD>[KYq9 ofbP V ~EH)aAѬ^hao@t9e#js!gez>wC ˚祂l[qڧb(z)/ȴQ_ѡuxCr8m_(%:72j'451PʊjJ+~ʾ(=7LBH (|,I+7d)gtgTfbdX!G=L&:EJѤ *1isPPkiGltfl#4Q 6P3.> 'dSV2s3y0C%W{t>q~}SkL/#Uо+kG"zS\FMf %5ubW]" )0i wY>=YXvGM|Ml7=x510$ h1 N(!UdE6F&Q>piԚ '< ./v2@j|ԖhI <77ٴEs+Ocom00@Woi옝Y^>lk#)Uu)#k@表4!JU̞'˳L2hnR,AP dCΙ$pe@s: !JyO9I1>l5X){սY(9!zSFseY`AO|6kz![Y kzg΂Cxs_wWrOLHgiP|h~94 7218Ԃ"%3Yyy˿F., /%/7ZNmcWB7f +Ɂ+ߵ(-jr:RLo?wf ۗs'JpxbƦTl(i !,E[So[ٶkOYXD-$xP!OTae9̂tٛ13&\`ApkdYi!h Q9B#{e%-!1͒j/NAl'opMkdH'µR%<О"٠(J{FXMTh{[sv;Mni1k5f"u>ݰ-׭ G!VNi"B$ XS3V.D2hлGscYs4-&Aq[$e//E).@Y.Hh/6y /^rȑ [BcQ$_`p8ӿ.?TƱq~,hkZ,]]z1"|ڂhq4s`[FEL1Vz 6sR|7zY-z؛Uۖ. xWI> p>\q)|c*%`mhG]]_??';4b߶;ۢf .|w>+/3?`-VU]UGI[C^RHRdK8U?a2F*FL@̆m a,,0Q% OOxT{@V峎WvPN߯NAJ9|L<3# d޷I|O/nû.+[mMnkNB,vkƨܔ}z4 FO: ramHe/{HR& X& íaK3E:"e[9(h%QȀE kzـܤl 0uE6FK9=J^֓QA2k :pcPQKOJ#nRM5x-Hdl7a,=[mEusYͼgO,9%Qѝ"0G7 DK3RBfAv*cnAfOD>'\$jtSĝd:%Y"qQ;e! XƐhsx$haA$zĵ_\KKV(IKtT:%q-ϥiV}tZtV:±0K_b ފR'_n` h}4( v,vн\.~+; =k.+(QUT{]4wQh@%J<<8[aI!9)/m-Ng<x>[Es5Jts'F CG^ڣ?QIL8B/ o5g't/߃GiGMkI)azUIgKR/Xqt,$sw)%VeA \(E}8w#3(߰\3zߖg<'S%uai ?=6Nj>}dyv3'n{Dh)`ŐN~>f_?lh"&;>XY 66{,'y4=: 0d'YZ>(K*\p_&1L@:X-xU|[1 zDb eHч Ef)9̉ީKű8[nָdT{`J ?φl`VjPR'thRz+z-Ƚ9Db4Pץ5i@vU&EV|JHHIJH:a6V%+N;Os^U6Nv!Uݍ;M/t*Iing4&]4ƚtrQL;Քd17S|zPܪVNI x 䳳Rҫ#l dpu@H^6-ؘ[/j%K&6B@Hw{H^Bh"0HK|3&; %qe ]L~ ߽t%Y6]*jw =~?|?os!E,aI&Lk0>T Q&Gj"O\:C\$p兔7u /&%Lre^M702px{m ̅Us};aY:opWɀau8/R9Eek<&Bzx $4V吞G9$GEs6^I=c R%#Bs KT.g#BULtRY{{D.K u`]୷AJ:PЄKl%\Z/ad@y')+(xf,xEpk&-ʱRu12a2bQq*7RZr`1ǻi*W:X"rHXhƌ1l9xց izRԅ}7+ >WEG8KX^Y)6ß!\dQDďl0ŤVD: B[$Zxv3ˆr9hi QYӎ-#q"Db@ZL4){eС,ZcM22TWv dhRɨe^@OpQ2z^D.]#:,Fpy'C@sm(=.45ބ0 ˆŨ8ڊ7CWR 3#Y||[.* 1y#&Ϳf?|nX.t0utSC^'d!&߶8J]|~&%$pK8f;8"x'< ˃ɔ ];?m5C4rt"c<4Df4kuƩ:dg)# G">M1M?t^÷xټ1wV+lN]+k%SPs{{,MHc4WoT7jn=fvyqџ3//.ǫl0R3q0_ ŐNOhٱC}mjiml 65#668@yEJp4l`2GEGO <׶*.rSUrV'ŏ ( +>?cńlWn'f<[ĆΊm1ftNi8巿z/(z?͋^¾97^Y/S41l[l/Wc:]JQ~0er9Ëʖ_((5i2obS}'?oF@j[4--lѴb5z> ij>vٮ-dzԙKa7) 91r[9u$Y1η䖄 '4ؿUܹ\LMJ0UXofq@|;?I)% ;KHi8MGtfo>q:[Bm[:j~]7u7[4xQl}gzq>̌5S3׽M|tM zxhl.-E-ŏ:`S&+>pRa2[p#-ITP=bbE{)?ep%,kIHuV lpp<n= ZV娑eL,l$ڦl[җ4(k=;CC'JwzqB^SGS|b)gD֓&]ġ ؐAs1@1bultgq}LWx5hkhԯ^<4\dD]<űY 5<2h0.VJZNDs[RRˆާh-.ϿGfLis1fp8j{@Z~}o EJ?r{uaIe@>\(dPr-'7 6l|Zf]o{vZS_ Sg6qRG͔+SvMsɝޜ7ٻ}v40Qd_y[+[>rH~0,7=1X\ByԨK@_+mʾ2vPkd#'BZ CZ(k^Jgm 9rIzO*.J9"smPYZG]_t`={U瑞H,I9Q2Z[SY .Agښ6_a) _T凬l*Nś}Y\H)!)SoRDHJĕXpt7݃nX!`ui!ZI`kckո`fU ޓ i<³=X0ɍϻbV6jL^^O*}&2 -XtH` A`1LI &Xcv3Hycib)u#=Ejϲ1>pn#RpgB \zH6 E)jܐeȲmJz#:1g^>Wv[rg{SO阊ѳ4g1&yN;3k1Xʀ<PsʁQ 51ֱȵP%&r:$ 5`, 8FT94h4uSQ>ӫ%X^.d+r6I"4)зJ$OL o%{  ht Ygz4n=1G,7䱰<20p΁ˁΌHƛcr4&Qq5s4S?g)E7[)o7Y2qm\g1D,W<&V*OFx @*jt:"\ar(SeC0 v~ G_Cr(HZϝ4xf9ӷg۟=#+vZ?1 JL93>U5{1=g`R0Ijg43ggϓ,goWk9*$ƨՃHJfK!Q d׊\ъQ,a *;T~7T|bi\,^LɊ Uuz\KVWE^/BuEB_ǔdDž~ȇ<3bW'UP)T؜Y?ޯfy _у N"5IVGfMvFyQO姻Ub2Sde>Oq|{OY#bʼn8V.,$n㧪 3~_0_f:tR.@[t{1@rߖ+oTI.[,;3-GރLUMyFVb|UvhgHv:NtbV&;AWo=F eZf]DO{bg_<0뢱{R#6:39'o4MAxPHИQ ['Sdו7(VA 2/;@^^QKoZ8vU"Os$sһn1HRb5TynSGp&Hqi#(HҋhYҩD#QnuG%.l.9"\֛#VUD@WHW Tg{CW}+@+1:]Etutd:pw`ho ԛ3u(%J *]E?gW}Vu(+%S*V7tpZ\T$uutO(;/thx9tj#~~g``3}-F 6C:f]tzjQ؜Mg^ ~N3J:MNaYp.`YIh0wƥJpf55upsIPB)MfHq!Lp0&wXut젛NYZ!TCIAb#d$6M@)r%V)bׄx^(X%/>JDY}@(|QǔމA$bEc!q5ӪNLqKI;פ`Yohr*BD]i@)hiZ3*{EW1#T->]J1XHW<ﻉ! #e6~|h쯎|W)vrI@{`t'~UaܳE­/"ҩi0aa,{4N",[uҜ/1.ߺ$--}~;Uo~˻u@"44xK޾Y78fu̷1x1UFEyyr1xF2+>Ige43+fGwQQą_1FX ah%6WLnS*1W&@h4&~4ZL}"vR^k3D^^.5ڴy1za)耂4^]>1CHg0-պmުC:&RJm֡XI0yĴT:ɻ}&A0Ez:T08(>}Vi*6 u[lؚpkz&)t.Y~ g nⓝ]\̦oAr8wVF1~\^W}8*۫F>yi.(/].m* c50i@Qn=Nn}QAMPq ]p@zTi+ثYr9w@9sm23ncudnVDY*A7X7 Ө 3o&Ix>\.'c!> bANk[=1B/fŻzBK!I;Sh:=Ms6/ji jFhj_>9ݔEk~߫>)Yrv_\<CsXR}JmH0&zCV4jAsYTvM'}K_N>vP*pT+RĻsnͣԥo~=[qw͹ "]NW#dJ=jFAm6حIJǵ6ӭˋK^'1\ Iәdtޯ~0] H3ۓmIW`g*!]N٢% qM;"L=ɊU=Nbh>*Á5AU1suϾiTg_S%r9BNpӳy'Q )0^#1t/r6Qr9780ٴy1opՂ ])A*kf&$h78S c,$l W}~%j!>Zy9Z) dE(R9mfrK gBЪMjlogTAbϭ*UnQak^l>Zǚj3[dT)TrhxL{|rV`LA(2,:9sY=xFGm,$"ycV>AZOR))A#+5S`!W* 3ǹsyr^r#RQc2nѠK$p*IJ9hb )f߮fgF1&ly%%o#u㼴ߏ~^۟UZ`$gl6YdC?O ~*t[5!bWi_R7W -j`E&5mRnAM滑d$$ٲ&L?B,;\HT_v|] FmFٰ:9 |V\#QyFŰ=5WGӮ6q`72UœeLuf0o <&BS5eZwiVA7;4THՎPMeÉjlV8'wdKT_{@Hٛ݃\ٸ봑HZ1ЄL}DcRTSm-\f5UfѠYYMe͊uY`j.E6bHcfs@|&h7r>stQtie&H,++xt-z*) >i9pC[cjR,, L9ƱtsN,ךe]P"!vV`@o~8smF(#G*MW <2OyUxWh¦@6koZ!?N&Z诇]Lz)?3߬Ϸ|A6 H  J)FppퟹY#isoZN)BԙR):YW_R5z?wgO!ruOwj1+CkUq|) XN v爋zx l.о[?7&3oveOŁ\?W,LƿLB,g*`XvH >@,K0?I6Ew'1 8zd,)!ۙe%ķHV39Ak!KnToɭf +_0\%3McI#SI3 cwg/FW(Y W˵Ս^b4U__Cs.(a@`@QB*D$<"!퇷?jŎjQv`$1PD61#T]Z6f8\V+FC$$[o%Y27 C )_J7I:@;{K-V`Β4 7xΖYqYjRŏk|>z~ܾdv9 %p/y֠7psEg&:^ WAuIP\\j.v!kW"Y?O]jTl~ggټ}CDveW }eϣ3ȁo#Z\ _kU&֪IթŮQUU6 _E˷1}]5U*͢?Շ BjeǕ~=]\ܛ_t6g3-NY3sIϭoDk`Wt<|Gxe>}4~ 7ɔP>7b\|g< EBF٨H wrA|b4TĜc% X~r.֫Zuluwd+Iw>d{o2 ,/Ҟw6tp;Z0.ds{vT!QD;O!eGUJ-5H*HK<=2ݽ[4p| G^Ơl- Ql?50GcTY}=g]?sw/TdO:d"3G -E1CXڒţXԃ&{-?uRF@"] ^#q<~4N68a(qR1Ń'8]ǍɢE:X1dy15Ծ޷!KF-3' 8 !B-?fug'1e!轘v E \@̞L-=1!ZFFvW' )+SY>Il"(Fzt=_c՛DznO7AS܉.L<L, |ylJb"+j(F}4#:)jXZ#hl4՘thg+^Nw1OSEd1,ET]VL,GHnFEe6߮2~ެ׋ף*R(9ɢ@`,0QL_MvRg<\>޾o-X +yvKkv_gGJ1Ж)Q0'Ŭ\52_>yCJA3&Xs ܊+BpǢn1S37/=M2`PvHifRVI|b:Ng۳6*)ŭ/9x`">D2m#.C g~n7ZBƉG`6c2w܃ j!BWxAX%h5y9z'T9HO^@2x"|(LԞ";E U\J/h d\ԧ;&|5_LJ%Rʋ53xp 0C~7a~8$orYriwf&mqvY>)@Їω3)^WR*:R ѣ#qt%)H αGOB8Nܴ*'u5s{A:L.⳾t~?̣tYhr">m,\@xSG.xЇ@!Bi."%˯ sl!{&>\c@GePi‰ Nx?.K":֚b*%8['.*[bIQ۾5lPB48{;?P2Ujhl=Su=#ǁm}p!"BBKK"5(|gQNvpE# |9D:w Cq"؜1c_#9\M~KT)OU5JJHe".˵\@;Ow\*Cv de\xZ\A!eB:=>Bqj{Nǒ|bw>i8moZv&Β}DàC#&:EITDeqϬf]}aP^ \[ kCRa:| sL9li0JJK0Qa+N?6k{Mgꅇαh]@&X3T4ˣIWr3E2g"cKD#vB@[+d2F$#.k>=Tg#,DV_Ft|t.'kwċ"xIcBhsh<"!.Z\bB}j[ Ĕ !gn-n|I0 (@Ak]hJWh LǟJ} \L4/vQHB /)ZeҙX]7uҏc%|he)Xj'z@XQ`qf1؄mj`@?zjLIJbeE%ыg`y^E)I_پUJ\@~+YlkS.w9:q}KsHEޖ]q/xᵾ7eu 161!Pޜ8k% 19>D3bo>`T(hol4pmVܱZ%j<Fif gfH.sXILaxu|kudDv:g`V9z64gBD<'F"*FE10UOv|)v6vT2b1Z(Jl7kqo弰LY:"R)+m$9EX`l} 0Xcl,iwQDDR֯w1I$(Y3 t*#832I URYRQi=k1g`in'}ΣkjuaZJw5J=J`^W e ȡ€ReD),e!$"`k 8ڬىcٵ-QX9i@J3_$2wWkah il'(l1Q"so Q.M'( Q JM- *K ǽ=}LhB'|1:i#-jФ@\[!M?՛T$HJ0KDh$E,[4Fy6OapRb 2hۥeIY04 س 8m톳Ǒd6íˋ|8V\O>?^>ߞ5Hd3C{~I!ں{;2^ \ ʰv9h a%i6``k`5YPv&HQ ˙مz;9@6a4v1CM,0"e2Ac6Jf2ޡSZT+gY.7 uƉ8´w~N"c=z 5n])!m P xkaLTRID ѴHD.*l0>< Xori6kLg|,Hrl)DKPPbJ.왙eƉ<،9k* WC^1=<<mٿU~4a+ݻgW<cJђ?Uq î춡M=eoRLêk7- U}xN6 B/\YS{>7^?c"&jdT%i·HY4ǁ+^϶.oʥJc<5S/&ۨю߂1M91dg8xSgǮp O|!ӯM%/JSޒzT(qa^G?J<֟sd ңޛMK>pN(qS!Rw!V*^J%0敖3j.u:: c̢sڞ{W f"Oq!}M?̴%\ʁWT)*.!~oz>~ֹ˛;9Qԑw'4&sY$.д˿@z-ds0yJnCG8{ϞJE9<4M;r񠜶Qp|"?PLI9HU.MU;M7Ki .u%TywVN?t7֤IbQe 0~(QT9̗ [\P{ut|8*wXnజl)ȼCZIOgr{:3($wF!Dqga)2iBO)C]@ wՅ~&J\x$~~y1Br2j .?]fs_ƶbZQ{bjz_L|O;,~ s%G,H=ɢ.ߘG YF9]~?`;yYIw.Ftĭ[C ]%zt#N(r B=a.Դ._F<)IiX&@*_kN$ZMqL3̓[M"hsDʿCacH*Bp: @TfH}N JM%,OiP3DPw.\X| :b⏙-EzVSʄ3ވԉjm`l`W I2{$2gZHH9Ǹ̜^~  Z?UL )Qtio1-xT#p℘5z7OyWQ7hwO4Bh s"#FaPa -TG'ɟ 0n8Sf\rUNFPGP?wFE>Lss, $U'!A Ba  # ]:ַPdW*.Hqg[4 tmv,FYrP +-; UGlhx S2Am8{u~8-H(Ƶ I?sHtuI4D*T^dc_ Ak8Aa7}I呇Î唪 CKNmP()!k q"IڴahNΠ V_8  a.4쒜ah, %JpPXxr Pak܅>auj ^ Ç*Khs7d)ݲs&J 8HKe^'WG`wpl#'=z?1KL%!C Ts3s" p\X ]q XUYz#Q$T6m-[NuuA{\=kܠ/)V֑:b*u ewسu!v5OB|GB>(PBbX ]f,V/w1qcE{v: %.,wVj{bh8. ӱ`HOO9!V )x)DB} ir×{/ɕmc'82(vB@"67ʘ2Ky7jA @*<^ O*նP[2~85'ժ۫iBllik2-?C_heGm'͛l)[7Ñ(MM&h{$s=K_깬j "V^/}G치egX@=?F<7}cQ0Y6( ?u 8RҞ8 jschHPϊNxx~~ϤA( >l81* $E ^A9\KrCؼ\8qclZJwutg5;lI1$ XʡLBqs%?Fh2\bPa E@w֌ڨw^oE[ /Qrl4Ol4wX? :z_/i/v7Ze?~/R~qKQO-}?Q*b կcHI~Ow> j2j~obYeW٨ͧ@A X2| `qIeCfف3J9[EZD~ڿew?I$ 4r)Ֆbi+[,=̣nvD5~uH\@e|ln:P%(cQOmY* aD[@l]ƭ9z.~:PJ2;^%gN,hx=c%wTocɳ _L˾y}cHeФVQ4"rI+>1T׿PhWH$߇}W'Go7zDG kߜv4S=w5A!k;׻ǃzvYPEee:fL'wPY*f t@EA;pŶk\ܟ%uaaCKѲj5ف$̧Ì?@S)=3{P`kveYF?SiZ6C6}O+,:Y+c ڮR !B4Wr1$r?{-nݪWϕS[/u|(޻~0/ge/hR>}>u6{&e_ Ri-e` i0MOg`jyPvަRC Kl8,n-Mz0F TPG&&]V6d<IY%z,N|ZP"fѬa/7n?>N;ULap@",NO28s?s-Klh fV n5f_ۆ@ hh)apfk=lOpaC\l  ũvؤ?SO Sd|TStg=yV\Bq%6(t>ӢE.d,ѓM~uH4 oeH.3ŹۘϧŪQͫϱ;xOǬ U􋕕~޲dCd<І%6DHwϛYvJԙ/ U VkTDuERQLp'c{ә݃;D+G\SrcfNV2J0iT b5\'be`Mc!R 1Tr+\Y[j R5̱* WX"pzË(R;NI-+'k6*Ћ@!fCOP4(mټO.T¤qpcbjj;OR./m!\4S =DKr,Zb'oҶko`1GË59unͽ,Fߧy\8 6I.6ypL #9LK"L qxuf*ך!KUa!RDTv T7oxmra$ՐYdp Su5[~9S1U"csFXKA][, GL*waju$F~2q#3Zm]!VO-#w9%$EuK X]_" ' 9&fBrIF4e4#1KElEre+G7q'(c(U>(b88^뵐hQ#UC~4WK8n4#Ct 6I K1ʵrYzpO9`BSޕ23nuK"M6c P"anTfee$csǘE?&j,VJknJLxxM)EWp(#ee^pB%6ָmvv0HG>`ꙵv5mZG1=ݲzcF.-s=<7$BLIH nQrn[WDל:&g5UBJl݅>C-tO<*s\gON3W2 7pyJbۘ@c HR,i-2fsb"|J#u4I3L9;*F' zm+CE5LIp2_r w{Ղ%oF>E9ײ]7b9ayQ(½B>Urb:{J]&˵"hg5b_+}_H.ͻU eu FX nuE,ejK$,s T&r>8?0)ϼ/^k0uVvWE.pI:sg(71lO2 wyO*D@CsU˔w\$w:sF4 ֌iU&eէDk߸L?9^.Y4-xSEѯR&ijtl }(qYQ-uvn[U(&TNkb8,SpX,'[cSu]ƧW/q*.t?09BL{*fkIC ]b8Ws);cCy_.U Var8K8ClׂK-l [9{x|„bB[g;dEϱ/}^?{.{~"`sSI4v4A%6>W1Gg61Xbo[U{mA7ҰKxk>8W[zE0^xbf݂`o"^J. Qh5`99Su$fkaCd[E^% *>.+v$YfJYd"WY$Ӂl9pkz6tPWݹ5c=]|nt$A>\ YJl*;Dק xehR xU T +J=c~[I"G[|$9w.~luss5Bsh>:}XSf}qXe͛tdxh[_ c oiǣtGP˂4ydW7$~<ډ6:bD{ҦV4z֗a&j7E4YrO7:S7?<':F\P) R^σ\ʦ)n2t(OX:srP`-](ͱqxPСLsҲׯ~Q5.L^qo#dCTKKŊ}#㌞ )zoJl=u:VL ^pR#ac'1F*Q_ř䰒z3_{f`Y C>|x1#ZiU$b o4G3…qH 0ل%Y>߻C?ي= X~n/cƊ(sAb{]4<ڽ pU fY7[T?wA:L;׈XGz[w[ _&4CÇ=9$!WSe"%YԵZ~]a8[J],@=o»nZMwWŦ^)=]~ w-Wwx=t4iCa{b<(}3iuw>!3'.u5W\MY{(Njj|&=;jmwYX쉀ZRF­+Nx/ݕ_MԉJ'Ƚ|m㖕Æk-0yMnJ!a;n)67LѴjZIkE˷F`HӪ 718([Zn쓎nfr4n/MYhDֵ.R)T#u_ DFZBj[]o8W;;Lx 9S;?DU\qDVJ}(P8FqIw񽏉4u&yj$a]_|^ ѧJDhDd {>i)4\,G`ͮaɦ&? FE8Fik-•@ kL4%Qe"SB+ 3˜ָ þ5m%JTɎXæ)520b&9@ap $:Nii2oÊmz6&ms9Qs>6L~e,8Y2,7咂E6GS0AY1UG _|.~Hv OZDLsg 7tdX5-.r{y8hf3b5W!uE/Q2Ay&"zaU ;MټhVD,Zd/{齁NJ I\GaUhQ@CylJ1 F`=htϣQ&BҌP(?DϐQb} P@筽lּu-lG.yn$jdM-, ,d>wwHV:8e 6`1AL, XoV.eT*Q B@o}VEGV݁ݣ{VZ;(}mPsag +tϾ5I^jFFTNW^2A];>^/SMT+e..gncۼHwsW\Udeٗ",RDb+% hHbLW._W>Rb:C2)YF[A`JL mRnT^ -Uݳ_=n)Sۓ$ZF 4 H.DkHrt ղZ#^an9}v8Pa17Hw1U_R+?e^KvX /~u4Pq-[[,FM:nYuє_^t+5 \"jG#šN#gҲY>'$+KA.f|S}M 6;:6; *PwfK*"0 gFR"IF G52v 5Dh~OH Aȩ]|wx|Do=sy'MĶO=IXK"nҸ&.A1XZtec؄'Dʤ|zA/GGv;lL(لKS(IIRNc3%J(B kDWumb[U}zנ‡FF r(::d)Jy}#3`Zm'0Co+SggqOi7<`UKEOP^`2`%!iQf3/sM,j4ݓ>D'$u6C)B8+X}U7|`!UIq''FSOި8onSP#jx$.^ =5|go3;ljdQrGV^5'ϐZB h4qEABh { en=NQWvmCrc&N1 Ai2{V'خ7y4h2]52K |_z:'L@ñ25u9nց^QwRä(dkd4,4f-8+fWuM9o BF(nP{k,52{i fZ^yND{iMcxl/uw{T1uLAv`#]$ j?jdeT ]d(~:n鉩!bWo4 a1n[3k@k+<,rڌ9Y')nq)ACJ!#yI6h1\q9r\\$TteB-Pb-D7%Xvr%WN.IjQߎv̐PaBcû.;|ZcqC7RG{]|3 i4'g|X?^O0Q1] 8sco-[+DpnU4SzB/9z9G( e{U5`k3٬5s5GF3޵2>*}2ZjS8?c}/*-_7@)WwӋhj<+-V8:ydiY''WDՓkZ"I:cV(`(Kjy"8C.%Zo-e<8.Kfey)̣!L 邏ܭ ;~V*_a>Ko%3|:7MNᶇ k,&.Z=꼻oNx?븥o5upHСuiFoQ#UCCkҵ)2g!bMP'0_~ţUF 6S,J]y|Gn8Jdz|Zcɴ.4ߣx#징/L&1S sV䆜LģnvRK쿳B|ኸ~]q"ucRfLfWF9-$(x9/]OpZ!.,6&h>G-f}>o .gd_yoؕ^SFE>*.4G!X%17G _|.~Hv4Ś1?׋uG_~8>4?+rU.JkۗKWe4x(.8ۦ r,0}G`>̣(TҫCy_4@1S4͢۹}rY iVqqd{y嘽Jou /Gptg y uPPe+IB?^ 7Z8ƝWծmZXz;: xMFcBmX30̃.#'ͻ3ݟq o3!Dtm<_:"\@ 7f4B= J< I'*QZͭꯁy/qv +fpٜЫKU_F2$ޫ㥒>WwЮ?y/LON篟in [Ѹv%?1߻ ) Nyqߎ~xtz$uƕk *E kI"~:?L'E D0i%qYS[RA $1%Ҁ+/HƙÄG[Lyr]Ͼ.Θ40H!vi\ {bGfn$LiYMxHib݃cm%1FI@68k3`D̿N.^ó^/66vH z*` `P)'y%"|}1ゲR3l1![ 48DpWdqqqz;ygZ>P嚢laTT*bP7v{j]=J;ODTJB/ VəB*LmSyG5C3k\0 e~Mr&jәW!?;ΩQKFF/0$<ˢD C {\ c8JbFԉ,G52V=%3R:C_'lx'r`i ?e9F52u=J :㢉g2_#/dFWYWNi ܧw 4 @52@M2LNm*F3O:]󦚝1qmvM :0*ozlFUMf'ty\yRđ͘CDS eiT#]h#U/69݅&c%h$bz3iYq86U*y8eA(,cN84^1sʽɊVLӷc9_1Ƌ<Ƨ=] W8D'uh/:VlmdDflo78%$Zc$7J۴`ءk7r.sMm8΅ݣόuRzo^]t\ͤYq?op}6p+xn]6Xy$#cŨ*d=fM?lwVSc]hg >rDŃ#[/ @['idHcؗ͂=? %z&9+xr6Y.1 q;mlDJM4}Z4 Wc,g?k~Ufnph0_,8%wH5#iG3=j·0='z>N/պO%@#\`( \!|#WTFy - FglJcejF>2}0>i/靌quH'}p4z_1UQ/>?=P'k4B#*D E*&/c^T"B3 NJH)DN TU|@lobF05!o lF)}I 1=uD:mXER']IQ'@/5 {7"*$rD"XPQa|jGaUtrGȂy65 [Okoݏ#gbx3*.~`<Үjfӓ$oiSfL?>e%e vVD.3nX۪TZ]|ДR_ w%܃ U EO-0 "J+^ }5R;AmWj79cZ9t6TjW s'=Oj'Jѝ'jt~9_1kfWj]X¡j|JƠvaM+/Tа9"ޢmoRH6Ek^EB ``ev٭F>`>fLyy C sH<*72g@,:cAzz R{ϊAB8y̱JE]ڑ7BTv 㛳\c;e{@2yDk=J>;2+N#W̑94*Ԏz[ ysVݓv,smY1tƌ$7ǂ5uRJVCZP;];nsV/W񇆍ujEJVJ;e#Dl) q9xyޢU[+ҐH~:>Hlaܧ<O}|Ũt}Hsd??~H߿h_^OdQַyc"थ5ltqA!Lp bvZ0W~2k;>7?Яl2 gٝfijˢsu5[ո yO>H^,(}(]tWIوeaPni>gvIq^fBMrABdcJAPhdbTKeE6FlR#.Z'C߃$lqCP{a@ܫ=6r2q(Kh! Z`)꺰XIPUݘZyDf챾!6[9&#RCO$T &FMYNBVwe rPsk`߂kzϕ9Awzn~߸s {r*Rk"zU(̴seF3ntYZA$u$Axz7A"wQF1rbF9D"gb*#gHIl^ϖks@ʓ..DqR?Hb.fcδvN/[f+ Oio:Nח|T` [Ѝ̸ۂi/ 죄/?,?޿#@'yo_--Qi>1%"IK C-<K5r^2̹H:(noV֛ ϝ2zZcSGR]vϱ!7A s ̉Jte%o5/UjoԎL[84j4Uj7*5!,3X]#29 ! 8v6KvOڭy2桔 P]v@ 쮎Jܭ3ᒠkA mn$;:Nq:w}e6vb VTJܛD۔BgBb.x^mαe-2f:iiS.dGA7BϼƩuOٟ'5X t:)d2>3R; w8lx۔2E07@ܤ6SrbuFOj_Z{cEh5Yn(3Ʃ SQ*w_X;{/YԒw':eDn utWe}.&X&-&c=6оPJ?Q&LN ciz5 kٛzlH)]I@RT.R ż Y*T 0J1.볪bFE{(%Tq Y%D{|SQV\.ł'zM xs9O8#0yUSmo[qA'2k֩H)M+76-qo:CB/z'6%o㛖^@\VQژIa'rOŞ }+ LU:O009n^vl>ϛ>Qeo~cʋ־&1a: uWkèhJlЅTD rTtˠK mkQ]Ti `^ QqG:6VQzn)ZlOT:{+HQ\gio!H% K<ڼ]J>^uk76edI,*B)NDU.eBBXh̙zۿ@RƘ#DBo;TeR2%ۣ3N|~[ - ^/)cg(ܷ~W`R!{fZ;j#ܛ>s 0=(B=BϑS7ݶ&6pMKJt)~%oB(NYo(*IIg:̂-ޜ.~|sqƟCT9d|ë}Q+gB{7㜀 M{쫯؟a{\HӟcNj;<;Ss2O>0x{g_NߝN߮3w|ywvH.] Nh|*ͺy+ɥƔBmzک.8w-8LV usإ{ư,vF1G*K園!MRTdJpy;p/YhނŸnMLb/ xU]_C>Dz'UFnVֳ=9]ev|.tX҃ef`ols|nߊYB>9DpWAg̀vxxϭF>r"Nrq~\{ڳ,P- ?%a.gS=4*3YF0/^iV؏&b0(fTbtv 𤃒K%Ae6JJr&La3 UvdUNvۋGG`-9<#N۳`jRBeS~:Ofz+F0lr+^'n?[z^KTxda^4U2ZKH>̛U?3ث~O,ǂ>p K3w @wt;./êq7QVl;]i^Cȶ0 #ZzgyzT[?bu;%5@]PJ.t{kw2B`UZ{7ϼgL0P9:̰ixV ݙ-vQ~gtbѧW v|J Yr@?g=CTYxK0#\ʠCY\`+`3ϒzhgdP@r-Cz8o^lur˥''y1OY_kVT'x8'뜽 :3/ƿ7[9+B^=9p ܇}@S"XXl릸{kYڕVr =:$Ù#3{&-M`}$9QryU2x3^ ӞҰ|;fB-Q6Zes{kص$j>0f T(Nt9q Yt4M)VF.'<=(6IZRi= =>mIhE)ՖXWXwS=3aoj4ziSzEYg65`G dqhtzLLVRA]33d5'})~=&_~8Y}.+ +$ְLyQ},Sx=W}OM=5JtRGD䨃7#J;鋄|9y46;ʵ W4bf?]":4̥&4ᐎ8xuYw] af83ù_nTNy͜rp7]3_@xf83b<~ξvhד|ONtĖQl^ yzv>ǣ0R%2xCϷ|JR8[^^?!0};Y|Ϸv-ĕ*(%lqmaWp%K_ss+ͳ噮tVb!+مo=="VC öYC woXu)XrZ%HRl>$DztՁI*ER3O~ՊnÂ4וN:rRW.VmlMO/^A<H !%.|5R2dkG]*fl>1 CDpMT+g/;khCC1FG/x|8$tRތ\KQα6fT:AZ)/.IjB %Ֆ!TUQ T[W%`%0VM kI6n\0o$>DEFvόJ0[B%VKsļݾy9fn!'taMhܔ7SՀ$%6W &{ tuKJGs&u2+fŭFv-[x὇nٖTɫOQD>+ֲ&B5wk`sL:osE0c Zb2em.Yd3&4P.u64|"i}C}83 e|}~Vk1|1JHTz)Nsds=~WUW}gAa#kɹ/25.zTi:·cO}+_2rHmHw;j _aRb7吐ɭomhg_>.-cjn夶2oFF|{ۓJ{RBߒDc8\8-b!1z #P]9˾Aƛ2%,!OSudij81adԨ68[mK )J6p\!DR۩+ 4Sɑ#=}xjTMxH54 |pkAEjC$rj~#84S[Xo @T괋oX+ȻM4 )tu9\tդS Tb3T.bce0e.=q{KO|L_`-XXۏKOZa=\qmA+W\]d%UČO{WwNF$E{N/I; t$pʆt|.D>E#%вI5Z,c@JQDR. >@@j^bTԻP#dug9Аmp$η[U UYG|hA;ߪoU=dԳ>j}pYKu |30KݒR[;~1r%_S,O m "7_K._.z`d!c712G6`LŇ /xKi[|bB.b>@2`Mpf_9EwDY{"3P, Af;0bCp^trJxM/Hxe[=@bmz쬍t%o^ $g!Xtx5b(*/>셦~qQe'4DTSJ'ʐ*,0hl)4ŐTsPC6TQm*݊ɒ.{цƴcCFN`p7Hd\ƸqQ-Sn!ŅV` !یfW]aL%bY]EjAhF{цƴc]U4IΘ;cNSWR^a )ZӔ4eH'8}pjO SpєR=j3Nx,"ʞ|µꭸPZt `Ccq<P͘0U;JLO$Q,1֧֋Ь5O C[ed^9wnPիVcCx<yS}Mq|Pq唻.$TK'JՔsSq5f;hK9{ǑcΛr>0׍mccOXÔeERT.Rլ&G MVH$ gOw!PR>fJTu9h1n~T/ cQGt^p8›2fJuTWۨR)'Re%b/ 􉃏ѯټ82J3:S7>qgOi88K-3leˌ.[fek}vg$N䜔y6A8טh?TjTcޔNPR,UŤ2%|@[9νXJt Dɵ]Ug]p| KMHq(mksouoemX.}t_ `Ԥ{KކpFpb׮jftU3]̲&k.yz `^-JFف2I`ny6>ޚnX,V}#h(;/=Q&jw vezmŒnft0ۄYvןaL#?, qUViLىĒjTKKTCA*eԷ;kvK T}pkĬjs~IAda߼[HW^y0jZH tU\Tg-yHB.hQ辻*PTsQm{_LTI#)-+i]yEԀR\z׋fug-٧W6Tw,9*Y}Z?=clNF[ZGoSsԬRUL()&Q'8:HTo5G{u cH*q,^U_̳0&s@}=F|xepr-`'u7٢2I|%t$S0'[/HvD^g75eEs[Wo<"w-G8 W9:Z6渟Zoܢtp}( hkugkS}֡UK3,Íö{vf^,nl+{Y:q9E($f+4w+'3=tqN҉ ^YrOp ̹FL{FBxmGVR6?}b)iIҺt#\PQ-[`X)sLJ[dD\`CbղJJ\=W' GY})e7Ԉ|J*m1ߤt@<8*9nce8‹jl쪧㨡rqd҇`gIڱUs^wu_ 2ct73k`Mq(=} {v=go,y&ќQFr+q ́$W)N8xn!ǬC43ZԜ(4-E6 MqF4n#`rv*ܠhARQSZ 8 d;v/Qu;="3fG(֑3Xb pϪ}#ٛC8D;䵓27Γ23jw1Rxbwo~]Trd=?dKɊeu{TdIq`ugO-Y\8PCnCI*B-V,ؒK\s^#y|MMyȡGϩgMw'<N@!..Yjoݟ'tRﭟdQK}ݖ"R49/{wv<,]%g[= _ bbrb~{?bS_a?/hh T~{˥U {-d'_"HxF6w|xrG(vLݼ͛#NݺS љXEzM/Jq,%'R$(!rɽd7?g{{TƑI#?ԏ*5d)! cJ,jõ%.0*黜KhSEFWV"ԬcQGu|f723Gkt tUU]7YI %1An*l꣦ZH:$5 5 u - 3@WfIQG]d #`*On9rvu!Ekj`~a]4yt-1eBOy=hI|1HV 'Q cB^ Q'ˇ>H} &F̺K /MŇ*2[Ju65FC"y!^[݉E}ɥS=26Va\r6R%cs&#8wׇ+7cC:nC75,9{IWmFgϟ0TY*hj*B!LC}{&ғ[EnRuhu.{5/9>:֘)VPᎺ} hWm}BZ0-߮y6N<14(B|=yAK~'\V}"HʬR|]tOW+I3=Dsf&ef)Y f,,sfd5Lrdžӝ?{V".S?w,z$Pѡ8 8 W 89 f!Ժ}n?Θ"j>xL<ƾt "or(h^.?ѸZ5ϤV8ۨvwleFi ==Ep익kgn13u)x@@$u.sa(aLCZ׏Vf~5棾OqEJ%}b'VJ10H~bLVw1x %Nhw @&'ۀ3^?ݚ}CfC;־6'^Uَv&7ސmKw9IgSd-rGCbt9Tpu_ %͡7BHϾteм/:c+FGP G.-5W($DM.qs&OB>qJ/z}b +wГL: -Os"ȣm+BPŵv#Vm p ͨ%u6%qBmXe^{'Ay$EF&At%at B%"Ǯd4՟e5&χ|k.9lLMK HWϘG3}زiUQuy&e,\R9aόKW{)WBb.EY*2 ?_`!šFŒ\ijcBC3}*oţ(ST0K>5H\TĬk8RBd-, 1!-T cR\*jzvcL [R\5&f#lfMч}nQD  |p t+kB!!W׈etskV%ѕ3%AD&ɉJ! l]T.RH;7)!G"lG~^A:hV2{fI"gz(/$bƠ+IdM6HJGٙqbfJH'*LR}iv<\=д jlG{wZ|UD60u߻G}=uSCfjifԞ#Ɨ}@q<ͣZuM Lč/"` 5mprby=ѯò So/~$񋦪n4@icXeǠ\_IWǻ~㚚>G n~W_|ۯ9a%vL28Trc3Ty9 L'9Ac3$\ݓk6OY4AmρB{6KdFg$>.~/:ќ;Y?gHi4oG&hHǎH \Opf߂RU.wUZxUOE0Ojھcl%Fa҃In6}`USNT"Ƹ&8]oyO*3{Ybtӛn-]^ e,_/SdtŝtOg2#aN>\ufe%ҎˌnEw9:bXoҭȧ+1-q[]E ;` !z4#_ƒ/ =VKjun-YgF>sNXG/>s7r{.PPajd*-Y SVM]ae{ #:aJh6D!s"J 75 pf;dC7Op*WQRkTYfOJ`>#QW5W[z[ rrjŒIvQ!х"% BBʹ(HۼHVE< bSO8%MG$STY\鴣{yrvzXƛϗy䅦 ^~rz?'x{5!\Ok2,\ҫ?z9~îJ-zVBgT?9]_?4^ufտ9N'9zstvI<;/ngk@oO#j㗝yA{D6)\xMMrkzwX{ܿۊc%wMrNW+IEG7OkBlݦ"BbC4{ɣ!09]*sɇns JL -cu` $4k+B Χ2 kD]Վ:~`cGX p=GJi^Y^e|{zT y 4_)R7G0 󡤜\qk{ʁ`ͽ oJ=_æb{3==|d771_CcH?Z$'z|S06f=s68_seGèwH&Y &Gyo[#mu)vWGp#Bs$&l]/\NQ#I\+msBweXl'_V LMoTal޼râOYcD*2pBMr4 ] .9;,eԉ6_mUhIkOutf`Iz9S؁,w2풾͂ck4Œ"drh >cGK_$Dgq"bɦ{x[u:+`B$bX;k%žI=E#H,֭'|Vg+̢Rahe8Vn|"Xw.ȉ7vmoi7{Go܎v8kYNoZgNjػM#Nw?'kyv-oojڽuu!]7^aBv,9%dTE;ddzuSk~, vr/;f~}ߟǷH^' !#ɱA*v7rz99^FN/W9-oP5 :]LR%¶yG]ww =*@ il!*1%LO1fE /d2Bi]'Sۊz>>Oi%3 tT'?(aнLL^gLGg6qstirolK9liM̍nUKo97:f;ۖǶ困WavrQQQA; Fhgh>ۻt]ùYu}e#v {)p &(Z}=etrON/[r ?" S`WŷXq6*GT@wnS9BDoi>l m͉ǕJs𭢅79ZD qވs0!w "榈9XF"~L!+M AD;GϾ$_o\X4K955<-~M@o7TӐDUBp={1".,ҕ1?Ht)?%-AMNYP~>s^fA6&m 4 *p-"l|NV鵱74Փ$g"xax({UnےޔHwUbzCXizg>iKkXu`6 6O}__KSc.&QRlF#Ӄ Ræv[o-,̦7G 4A'6qf 6mE)?6\*x|k 4G_2:1:[Ea ] [x7T6N7V=r⡣HnMV$'ًs.Zsk0ed'a"ϨJ Ņedv ZgWTyqpv5HTl>zD-ܔ#*ML-Yǜ9d]j !v(}ש+JocJsٶ;{P#ywo[yuDAW=`;f#ɀt7(U"(S](ړkVW7>Ocv!srT}Xb$ʈߺ#2\i tV[14u92=Y[$AGܮ֫Z/h9]y8=0Fڈg΅h#9znvqooQf{\\b`̭#f9;r<1}c.)=8828_U NUR_5n}dΛrC.ȿY&tB>+ pp`{.q6!!45Ț-b {kGn)Q|- )rP'WzOqx8i^+ #GB4؄%6 ڨ08iP GevK>PłVd:c&_+4}6, 찾u\l8Md$ݖek6k9ܐ r1nR"a [9btAHE'!x]ET6K3U#IzD!Q3u&qJƛtWgWL1+jN(T]GAQ!z4Xͤ<:i ze-CknINvҸ%%BI"Kdك> ;Omt}sn{p ι1?+-:v `d.ޞt;{雓XFOoYŭlfu@ZQs]GG {pflJzV/ȫh1u-LJ-&n&7|ɩ>; 2q% t1dM}B=>ntJW;1^8yx_~Ǎԕ(2`@ ٕRp2BuXmG`IhWGɽ[ ,7W`ZέNAΫqo&G' `7 T ?-^#jllE֡ɮF\SxcE6'aWÈs8[䲯M|!X;{lc;nqfUlCJU2TA~tYt5}RT)(FɴPI0\B؅1X*dERH+)=yJugt笤f]O0+Ě[:=tg! ;y) HavODHmS2xu|:[Uc0haSkOTBvwjĠl͹ͥ!DribUa'Fa8kȍ& =Q?%6'ႸqCii;XVF"m7Mm^UlPUdQk\(Ft +CctS8Ja6uMI4h]`VI>=$L| !`X="!:맦ZGLUadT'eU$ٽj6ILϬq MyNE_֣8J.%P2R5 hz#՚[Pάlc$$*r11)x+QF{85Oy|<">Duc5Mc~Cb'a(*l*=O/)#.﴿^׳W}Bc~vA noA霒줦)*I~\I!2wd:9ZK7$qSX)r[K;~=ġ鞜> q>}*YfS+ E(0gϧO-{FiFS~Qd};*aջ!,IVS.(/S*!P@0h%Ɨ[:ӠNJX C˹8ǙORHx *Uox,Б 9'l>;3ݝ־^ʿ|pXZ8v*zEMqW*E.Zz&a^5Qڮ:2mڬ*[ZmЪM>bw}n {v~2ܙUz> .,%- !S_ش>1%]0/vgb!OD]&鲉@h"9.OڕvJ\[r\f|IDՉ܇*ǕgOԇm$K:z-UHwTg U۪eN*aԦ®t7AԪR,it5u]"5JutxZ`~pG|IJZi P=B'7}ř PwVc"5:3Ioᆳ|%'^َBEJ|( 多%sFKꛦ`B vֱ:-%Ek`ul_QG's @XҗQMEu. }(T]j$7jbm5Xϼf0:h2H*Vw;f %A3U=|mt٪#6̋2Ƥ+8!/n麥$Rjbd0Dciكx_5K U|f1{GMGl9Nd\T <slQp[Yۏݤ?jMEȐ5(*^#P1%f7߼^"mSX3aKXEpHcӲVn(9L/qYLEx>[[:˖9㶐3/Ac$hFL ~lRrNC1 #MohOh 6bf4Bȉˇb;"5FШ<9ZQp88IsVSCDTv  Men)f=1bK bVȏG^[PXe=q^En8Lpy1A.zbTaG.u@N#x8=Nip䦁gd«#W$24}i7ѡx}!pִÜU|0\$Lu.{}=oR7jfzu-!V;юytQX9sd$YBDleh3)yJG~>Ga* )@<E7vǩj2 Ѵ=yY˪fM%(Y;]s^MC8o#d.v#f|QD" !wk`[WV+ssw\W5TN)`XXuUiZ90hvfʻ>y1xM%c-kSMwLb#SR70"GOPM}H}_E|ZO;j86Mû}Rcp !4hMˡnIK37CXI$􄺬ZgT eڮJD'fz}Dm7|n7vhwg_%>ޭ"mZ |AQ ˆCUU( r52* $t5SFr eljox( cL+Ԇiĥ}k շI陰qZQӚS@Pb\HD@&UtWUj лηTa{&vڱ$jXBD"SMq\《\P{PwvvetitMlj)hnUjχ *KQ9y<[[x=7̳ҡhKs纤AEuCjzv_Rs =EtV3iGUױ]+` Z5ݬ{S= h?0ا'kO AK zS;{TqaJC0 (]uERē{+}G1,,Cyg{ kOU3bP|m$}Qq=Om'LCraQkG=\B゜njn.\p96UO>e~?ٙ yrn~Pӝ$}lTЁ@R-: |Ĝx M 9g^H 2轉%X ]_G;9S~mi+{'nd֏g^Lw@b*H+.[qhRrDSE M أ4pJh;f"rF!x9 ]~p~{ӗ0|:W?ڋ^7~Z^ςEE:k'Tk_#ӎX{éX*^ǵɛ}/$m;~Z)1 ~Mfp ޚ\~`4~m}UY4TwşEUlOE]՗ H?~?QN|!=i&Ia^^tVf~imYI`Q֩!6j7MMC*D'mЈM!U]c]aW R#OƦidpR4(",}Ѳ=¹䚲/Fp.5{b'v|̵ONYfiny1.YAR0 sfwHJYp ' H(1$M~{b9t6yfR4o%Ф]cWr]h a뙣M{Zo4Lz+f's[P@Q7_UNbцx! UO G>K%X̒mUQTġ>SI³ {:6 &'fiV;MJ-CJvj_**]赩+ߨp`8pM:RqL?~׫wƤ54X" AB5NSW D ]8VS(^cˮjlcXq[FR;h?",""8'i.0'ie,.Ivt'iu,n0<)Ğ{<l/fCٲ,03δ],VSfyl{4T1, {؃@*e=EAl~`N=Ш,6G1ڭ]!*-qrtwnIۄH=^MO./`e;tI/]ž1GD'4ټЃN"acGa ?r塞W=) r 0 $V&a)>{N7 n>N ? mDR`{{g j ~;0hb/ޏYU1_?W;rVڕ*.2M{rV|v;E@%;8 9=ݠ"x l^vl$:f n)߳,x}?lkrů r(}$ n664OZB;bյUThjPTm`HUQ侍>v/?1;X"31uDl/3L'hw%E|YQEM4M љF9c/ziGYj딻Tl?3`s&nmln.6Kh>/wn1=Xcs}F1a??sQ\$ =зAY,tpe2~ u(sx\vɛQ%_q*͍g9E|btD퍘>_/0f&vCDOh1Uܞ#5'LgTlƯP;ҪXTصF/PcCڕņؖVB|NiEsx͵^p8GWaQ`C7p%|I00ob:ܐ-ꎡ$?#h-w"^ƴS%)`Qm> { 6Dg`b`ȓ׮5حC~3 T5f#5lUCٱ:7t8&q(PrN׳c|A?$O*wێՋӄ 1c;f3^l_Fcv)JKU$.5&'aCo.7装*SҰAU{0*.ˆf#qa)iv iY76J4ڀ[pl~4QcӰGk_kݱ\վ2)WM{unUz+?~Wyi0_^L~{;ړ>$fkȮF#qh90hcfٻ6$WxOFaz+,v`IC4Ro߬@6. ض,KDw!ʬ*K|Wpֈ'RqR9x}D 8=w/I*Uh^K pCPKOivõۥ0onj7hJVnvD7^m'?^AmRYf䏀xCw~@TB,E $#]2 3 +k;y`\`h)Hi@2PVh-M`I .$DTO+l[d7s"wx\l5hE>YςL??4S{?h}a49nu ']Gҹ]ue7U¨ 5Pn.67rR5󜆷lyͅwe-N }&-кXJp;Ye)!QOGZFHpy Pʄ]pDM'D6EM VCiZ)u5A2;r -e^&cm[3>pٴ5Rֵ>ݔ+nF~YXa?eG{1 y#E3hA&޹fn3b/=,ˊGd@ڠ ܆QX1g7<'s39J>R^ożgrѦ|}ן~.\j ޻ W &Bso˼fp>s~IdڭJe ` ohX٭3payKH>.bЖ*5kikg ŖL \3ӅK4˘O 7t.1zi|wj/6S?لO9O'nH!m Cv\WJDQфU a)b":%"\X*"wPRD\F9 LF|"dA;iƏՎ؇f8[uI Փ_Or9;m_>BWTnrѮ\*o4wNSݏUPmXa.CԜ "ՁAwWp%W2OO?h|u[tк_~{wWkufa4Z%eެX{o{_5>2\j%/h:])H`L;%KyISx2Ċ5lyoF֕uMgmE→͗ ӆUP..mX~z9 ꈓ:~2lp2|u9)}(Ϣ~tgΠ$HK~ J sWN:⵻ }n7 }CwuB߻`]HI e9rYXbh9cpn5I5szsS KbMt3bC^!OO50Ŧ-%5Ҭ&άC"Q>YyyrE75nj<v- CpD@G} W n^KIm箰 T=]8)*c* ߧe|5 J̇5!VQ`l-iAonVirW˝>sYۃ\yܬltoс+|xO^1ZRDž27rB+ U^'y닋U?EGi΅L{8NrϳLm3c&]s@)*3\{)+8 -h Em0%nti#^݈+Yza8!^W􅀽ŻߌRJq5=o%p9c5!}L}zW/KK?$s ( Cw vVFXw ~hkpNqd( Sn#Λ*G-DhqT23LEJzNS5G fӰ;kM YY)"uӱ8{zX\ %R|@FrcU1:ݺZcoGPD@Qq,7"fٱߚLoP2`T ȀSM49BꧧQ?B]'\[jB4w>o&sB#Sѓ)5hq4,UÚ§\Ѓ 7ޮ{«hX~')4;k \/_=oN`K[ɉoy)x?m'?(@?ŗj;`zVݪU:଻l'}'Ce~q˴.tn'(a(:q»+Ci8axE@Pm#b 6|lC=a1ɖy[hx'r89ո;̸%3{M$B9֑VX nдQC,/pMZs$֏6Ts~Phsܗc J QO>[p@]h]M 5Zv(LgnG~\BU] >vI!v)*lޥvN@+瀽;5V܈i?IC+#mZW?%X@I;M 9M)M,ܷV'NRINJZ` 0 .r"aFLyql"PNZ+XFZjBҌ:2q¼ӂܖ&s!W t(3rтV)y\QHΒisXu$'ࠆ+ΑxQJy㔎47O@f!z;}?5[rQQZ;:0 I=v!Nh+j4 ]>ϥk=詊EBPMLs-U޵+ٿwζ|?ȇ \\lN\,fol#{@%ٖ_mR݉ eJ"O"y*$f4Ҵ5% K]̒ صyRH ȕۧ $Wncқ6ֵoe7;|vgNc&dchK=sYhJB*!HLBFq ct6/]>҉YVnseD_?7v鄥иVN:%$'1HPx24S) d1bJӌ+p)N;R'|эuC b'd:z̔|9뤋X`60JQD(I)&eL+N24g!qaP&$4."6/|lEX\\{7&Ӥ(ꆍyb%?;&hG<.CPWg?`"h}Wxŗ"g Tgkpަa|mw5-Qtަdm"㒤䏃m4Q]a A?CA pN9 IIH}Wï:yG_]*iѻ!{g] DtafIѠ7-݅]MKoڴ Ʉ[a!1XhbTMԤ?b?;KN%K>JIvR`kuaޣ=%fY e, Q&`8ƱHi!u*ֲjfm"FeK=6{8INBH*j4 D`AԔLŒ,Kaދ$zlԲyt@0G @=uzz@==nv c]EL0&OF7b="NO~wQGӇwy4(v n/*@Nz+L%vl4H~NMRg:"j$B'G,#*N3͙dnt@ %-vm,D9遢DӢsU;۴]Ê0T)VWlUKNKZ fRSj4Lb%N H;ɨV#%aDs;~qi2cܣ빰t'TGOIUm;DǕbCheMM֙8Z5D$v"rEܲE& }S1Dk6:w8>`W|LS:d N23H{'P{ sߢLHxȪ];/kв;eZ|O+ GߋڦBmlV)qG;OO29rt: |w# BhUPRiuY\\DQFbͲ@*J0C0 9ax !ֵngR~vm/ ɽNzEBNv0ikӇ_^}Ehs[߇N k+>2dHY3e𧼾e(ܱJ"˞e @=aW \W/7TWZ , j(Q%Crr;ܡ|4\Q6{F*\U^}m o$qȖ;ZH{3m}̇[4?pwW.[U!ƍ$bК 5.Ank*3š $θaeYDg 7R' ' 0 f05kՍ|pJ7I= [kU@lXgGa+u& ^38}^52-F>!!`ϥL6bϥN9q aơ~r@>gG;e~|n1hu}i@2&'nTHF=4N$S<$֫.qkemZW1F FIywF`1zÐT4` &! $!LB맶1f5y}onO\1oX2lZ %:"pIuE0xS!װCV?`.vm(/'I(37LSEh9=egJ!Q*4l8^"RB80$JC(,ۉl0o#?S͕}7 Ldcyaf k \Sv21A p kD?(0'b%v-x%pwԃ~$'|h$vm_wMՖ I4Dꎉ ~r)KV^Bz) -yna7R<U6w78 oq,cڰxi+Rp΅n>toU.2(q:„6ŕ0ᴲ;a !LhcPGD2&U6_ -Myaǿq)nj5ݺa`PKpBlévص,Hơ~ ̤bz!;!?sEi߇VX6>RzDʑouS8XXa/27= T`%1S~vm!sibgy+m4Nm24N>soRL|TBs;a_>?c;D Bxdw.? iGw{ܟ'^#ޯN JqՖF,?`@c}4HH3Nx>}ͳ<J%U}6&80:7(p5YukчiJuIbC+L,/Qpbm9{Z*gQ,Z9{}W^'5}" R?:`$|CCps0z9H:)bhw8͞fOߢKu}UW٘kӛpð!y nm 3Of󙖩Nt<ө4@,")d[2Zys4tF0V<{ hL8 'cpAO]mC iє退цf8Deqhjg` f~hũ۰C(^^Xl'{9As;Pܔl V"(zF9F{&YݜR,"53o_zxޮjN]QTĝ!N`sjɝ}ȟ6UgӛQޤqF)+Y`o/χh(({ igbS}iSge_Ժ"ab޾R .w \8݅yw{Lp&!F$HI$'ߩMr X>tIm%/HS%vKRq(@(̎(FًP ZF{Z"p?}"7t8,FL(jq%@}bg+o9ݡ/1Ӑ8Di"Z00QLl036ϵ13HLr.4F% JN(۰Fyc1e&ipH@- Czֱc՛@7ck%hc܊[d@)k|eJ~.;r=y]nb\~(kFI#*1=hY]Y3ߜ˭.3е]pkj//4Gk)WK!RkkËC8^b"%a Q4,0I0 `wטټe>HNJuoQ̈QԯmfbVW2Wʨt&&yce7^<݉GSu pȆa*?d' ݉KSHЃKزax &tX>MSs!M- BmlktMVkh Fb cL_ A&¹EW`V|Lt`8IOWlTpuϊH3g{7Q L Lߖ#/a\&%Wm[z,CŒ.\y8K/׳߂ ȓڡ'udq,He}y}Gaqu*/PKX̅.^Ln*kXW^"juͰH x8_+ok}(K ÷+=?Sks)%2},(˼ VRxs_|^d8Z0y7lx.kUARycKGϦ*FZnS]n1J2wtQo rr\).ե 76-I˭|^WӲ* WOGA>y\ƅiՊr淯O/C{~{{hūbx :Mx9|ej1Zu3y:{^NA'|~S)B-uQU/fq8Үi(iYKє F)k֬4:W+]b:}Zyś9HV/0߾TF|zSve;$Vp42ɰ//LZ<5ʶ6ĕ_lb;reHD71|Mfo%YRn8`D"E 1BC2piKe:;` ' =`xH7O c/EfT3s_>SV5fcL)~1pO9ٻ6n%WX~9=;4n;[[*\e&xk64CJ2)Wu%M2ܦ; D]ZaL4 SYEEs%iSJY<0Ie%>|ؙ@>qQP JgE}-&iCwJ2"{H"b1=4~+ϫ7SJTUJVQHIN\*Uy}iF9 [K֑W'o{{O|=}_:8X\vT]qɐ` 쑇W,"Q-2X#pB=#L'n` `wpm m#TuP9iuI ˯niNá],mFehR;f L(>YfLb8:)3:=2A ÿ\[7C`ot0Fct0F??o1G0@!**Q伢FK vuIaD*)W lit4B6B՝#$!ǁILr`#n,80O[$s)&De1p@+XyQIF458f4Kـ.Yz\҇Oa%qI%EB ,UIS"Rܒ b\Vk.Y/=dTW C"ٙE+E,r`֟|Q-RRixDJ9Jxm*S"QK\όB'2@C d|(ԗ de +THqv1L%ar,\UV(^Y <c}n|T訙 !`i҇zV_˦d"wQ cMSn(u^>xf")E+*7xKJ4yx`u]灉T*R@Mk =OI1E'b Rý@u:(_sV;OK<%7YPЎk|IVsNT}DmPR@IR6.|ÍM A ;߬~fS:&]s#%wIQÝ䇼4! {u\\p=q 60d{,})vi5G{?G8_NH"rTإvG^X!1hsd糋>+rQ ~m kLL }w5}Lye ^Q]yh$A Mp$G;vY$8^:-gq[#i"x }4DKE^#Rd.$+lıȼ"jyncfuַWw5q "-.fSĬ#ewuh^&w;/ {օMWh𾒎 VBhJyN )}1`}H-  !4Zkƿu`AlP)ڶo'(+Ǐ&t/I{\ IR+@'D=fکDR ?F~T+RW"貟%QpJEhrqiJWYgF (#1*=Qys%셢Sù(wMy@vhq &TwT#"$Ef{ nEH=LMZ}{CKL ̚`Eq3fᛨJ(uYʃdyx-R5xq{q0p̗"G'vϣ}-ц:c(\ꀕδg~Z57u8J_=}~Eܿ_&9p F+0Fnbtɼ~(E9w-7g+b:s7v5_N_I9%6Kw^x>UD"[>[mJ$x=4H]O+POZ4ɥ FcgwPBT6n}o%}{N۹o'|F&ݻB$l`bw0n`6LX!lL@ȞF9^N;ٻ;ʢ8Ǹt/th?ΦqãJF!YD<0;TgZ`CL~9~?=q˸p_}x?䏋X jEkݍ߼*~Q/|÷}1J>Q~F[xʐ * L2[~+D +jװ Ɵ6 n *S n7Es(w@BU"US9pJ 9u|>-z4)rBu~ ^WZRYvu ^z/Y)%%W? >e»@ѸoN+eKycW/rRdzI|(却>/٫F]֗=t֙#yj q<4v:LGJ@E#m̞9_R8u''r ;)I✻&+cC1ƦFKy ;y`X3¢;%&xz6_LIIҜO'R2AΡ~GV NBe6j!&,T[ڸ]J REJQ`[#E<(h٢vĔf~*o!& 4̘@e6E;JfnNn8Xl0TXbFFDauP40dq3;[n\(s2?jrfxFk8i3a5&7ZÁ I#Nn6+MwV{~ehd%/xIJk(rDs/5gƫM(@Df"8hȵAN]v]١Lxe1 ip0QVZ$[MRQ DcC6x1!ATGA#CN1洅.(,Iu%k֚ e*xQGM*\D.JAB(D:2tSB>YHl"Y}fmpɬ`aWޜy%~.$B %,W" E+ o.+E1`sƶN [kw m[-1^@h1 Qb  [(v]FV}c͐t{rn-%T3j<_B4mDe?Nru'1RcrMjW`Q&M6s,UOGD-b -IdѢdppT1%)2!:hh.vdseޖlNgDҬ\sLcʓqotJv6mŜM-yn&39?'u+1HpDIh^'Xje2w)GJ92 3!$mT1%J]l][ӏў,>Pu(s}Wh*>ݴYSƌJi8n"є[(U:ORD&yc"Z0(Vg6MxVB&Uh* IQvxhNŹ ^9 /+i3Z! D9Ғ c!j,`D V. K`zSNsA[j_ؐ| 1cM+[͔ bb 0c=!{GU "QTuQ `\~֓]`:*柲Nym+y͡@ @+߼eNV:Fy8*!ڛR;~diҁHڀ7`Q)G&$uQQ|/bEXo+nX7_Uy.o Ps(8=g^h%R8VWD\1 ˢ2rCc2F`Fp@*uw5ˉS[ԐHRz{`ȩL>ΛK:`6H9D'e&!II2,:BIA`yP3&7pv1]LNue|eV63`m[t X*WƢg 'q\5tr¾Ân/QU'v<˟4I+ť$툨P}Q g,/7pӴk} #j2KHv2ⶍ;2i#n D8eL* VH<fE>6g WIn bn% ޷^ E+TU:AB} wmB(Tb7'gǵeLӰ=/F;W8nx!YBxNVk TV>0Bq̖MwIgJ{_`2À?3śE3IKdcmd˫cE[WKѷ4A2cw7zuX^]_L9W=Vt@wyOLoG?sˀHqa̓H IKDБ pcN4էH*=oc"L2ΡMsiR%b -GХHWWIB.9]2ԌJ9Õ8Z+|DRL0aQ׵ 1p]V \ߥfbQ) 1|F AUZeYle;/(P0=Ay=(H?0jvEco˓. 7O0ىj~&R AF{g61ʐ\`eԈ2b\﯋2 AqWZS Y7_qrN1IvreI߀]Nk$l<, X uri44?~D֬5"o7|?gx1Η\=۠pgoƯGwwKk߀"t?y7ǯñbq{Zgz?_C«{}j3x\@^OtzChV4ZBjBEnӝ{]QaD k/#wsoMpI!W/ٜfI:37#Zu؉TjSmנ\Z/;q!ki`T!wxRwΨkuTghўy~xX#ViVó~9k(eYlD;0O0g |=?ٸtRnNL`Lerlbꮿ{`Woya& \O`qAp&S`EHRKO<Ȃy xuӽ49|K4\ xM$:^sHitO`,IK1rbcpØcܳ0ܶ4I*AװFd5IN@{_H!9T.4$uBb3]wƕqbw3Qؚ֤띙qb՜XsZk/FVsA)r22@<2ɵmGb4BX"r"I"Sc1V hsxy[3vUtصj;]jmĊ= %9$J/ZcӤJ-Y_uP*9tvԷ6jWZ*r_TWk[%kEE4밿oke두RǙAZ\j,ZJK6QFg9M\ɍĹg"JOP#2a^c͖3k^ :ٞs}fqL+ؾ#ui}5Oi>BL.)1r%9EDsNDrw"=ND)J!;"x!2Ih{ a7a~s&VG8LdՏKM1$>a:[-H@!0FII&QHAL$Gq"Cf(4-"G#$mX>Y^Yϯ Z/ K~"ZAi,0xapjzpV.VC^wQ+ B CjU5U/ [ޡ?~}xP/1^(zR TݚG*,UmZy9bM!xVBeivVsw}'CW)N\ӕ*NpVKު/ P-Op=Z%ӅutWڦMƄɢ(4f:Z8%;,W3eyKϥsh8Mƨ$5iJ&",ʚ"K ʙq4Ha(ȳ5xo=6Zֹ+.EH] R+o.?@ޢ$(|[tZ弹L"]7 ±$V;lZ%<c8Xxqvw.D[Xc氻Gh_f5%˿>bkt:j6 %A4>$7ۖQ0% <$u.9ZdӷF,vLi5w hMNʘJ˒V6:)W t֩Ś/,i1C>NИӯ~˩m}Ij)XVvʼn/ ;Kh9Hcv! !8(AmKXC)ڪvߵwk39K6Q]4e3xɸl3} O]. Mv9Mm"θf3axIjMA |pA}]PO 3~E2\PKrֽM/F2s)UK(zIǵŠYR2‚gJ!XdĩFH+ڞ5+8kGD\Y: >0N8x!vun&$H ``V'$کpRY {K(gJv@m$fH<Ә(јbfPS!o`rs2E$V&J l!%<0#SoI !H7CEnq]Ӣ=_'䧭rh9 g6҄vGCEBw\]ACW̃R981T=M?lqR!3jS:a,4&D 9{Oߣ˽xI5-݉ylWz8׋*^}UyL9άV'ƽ:/S=J3wYd8A:Wq4 5tB5^T(8]G\3H(Fq"eTl+N@a=5{Bj S. pC "9y,oBsϒ>Q@&FK`ZExASsfsm=2:PJaKN9Q{#R:+"(rsipt\MB(R2"$:2Da]S֙9yO1y-cxVO4֪wPSYu{$œ*KJ3ƾ7Mf[-OO7 } U {S2wfh_dd;J! ՚x"S^[3AʹA,]=6qZL띟̆@WCP QV ].lR?.Vs1A\VE fCb|a!QS;O|oFZj~MEv) #1-O|NJƠdgY\!%I44 % $)EHPp#xa(?< 2L9a ;%,8AgƼFʕ!#"+}ͩ} 6%BBh&#Zm{6X;VӅC0qH9J \rd@$R2&?E 4/;I0T=[l?^qʿ9hכLl<v qȿ9m_@*:WɸF] 63JbF-kz@ 3ء ~D*+hRlIM͏mXkZEHelSM=6#a3eXcZ C^6[-u4{)*$c=%`G^(H1w*Ֆ*6)-6{S Bfb?O#&{ϝ@N@BkPLRJ.gTp鵗 9%L% MR_pb?xx6~AF1|,V^;+ `*An@k3#M;^WkP҈h,R$EF*5Eht78Btg6 鈚>O}i2T=)).r{;`yJi|hto޿.%J%h_/w~(fGX[dM@0%7-Ħ1|PA^n즸)?5ҤT>)@ Ҋ)+AA}(y +]@{V[xNJO LcЉJziniB1]յ&֚|"3\ݼk'/; e 3OS8A|ƒ&)\ /i6a)q)Ҁ9<~t%:6 ꥞VC]-ILGwܜ{9[_ᕟ킐 ׃w./c/WGSFY?vϷ@nl}5VG靟ҐYB E[ftSY[xE%Ih:$FZǔnI O !L`F=8D ¢pDEKu"ZX-(isjP:-jhQUA1b mGW ~1Όw4$2 KruE,w,c2ϼ!kiUj2扯ï@z5\ .,@bz>Px?R>\Yg/Qv-'(_s߭M$|K/-/\˝_\)3c!LG*ݭh=B?->g'1:Ŗ)!䩹fGk0^q?_7vbVӜ~g_yv &Oͧxdrċ-͋o^/[z.⿧: &[7o?U/wP-M 73 /}ؠprŅKpJ_|cs9gJ ;P)p*W\hE>j/^f $^9#2T/onQ&vb Cɡ"X\ T1(y0%&/nJ'W~1 7ł [V{G yۋb>G+t̙.GN\hFU6(qQ:ZUs}j1~|؅ˏĿcќh9|Y/)T.6ZU#h%ٸ*Sj_ʪ='D~7ZA~@&R:ҡZ1!6w[S0),s4 2eTHCc3+Rf:J& ݯO7h9獰T{̢4DCd)qs0s.!EiRQ:lRw L\7{6eeVĎX-+oh[ĸ-gج\xKM@-5?quNy'  Buӗ«Oay;48Ȕ >1K:N ;uY.5qdB$䄔ZkNfu %@ltlA< o089 >v_jS66ذ71`P*77$fTӻuTdo3A._*-S_FUatoNo>_dYq9YǓ +4},)Ƹ,.C?Cyה,{㔐,r4S8e)IyS6g)8~u`GVCơC2[o#{wonqNDv/O-8C˻zu]{#ZսסyT߉U&ˣknBMn&Dg}*޹̙Lpn$0VRU$S*Ƅ2TZ <밓J{>\$Ƥss-@{6 v@KOvd\S&I۩x{}}45(] }f>dc tnұL?:骓o~e q\yoduASG&ڋNLg#1OK*{ ϨWK3azV iP%MzUzn'IvK@xKk BuԊ&!BG߾߾:HZRGj?Paavsz?[e%;od-Vd)o_WsҐAB1Ҏ|V;o4Uk8v5XƏv8of[[ȋݣ׶խw+dʊg݊jnrZ~AkHiEjhs-MI֩Io>TQrYeFg=y fv}YY6_e?-00oChਈ/Pa"r=CGo[-5hre4nsh%fPwqP>D7s4ܫ T/ `=WWJ+1k ~p,o=(5y.ܹlZg23ZJpGʽwwNAZ'(" !N' dd$@\`ֺbJF0*tSnX`]f|6HSrT1̣k<sPSS&L׌hK4:bufݸ-V`N/8i+|S婕Re,A IȂ,8Dslo֌V5c-]&r' 6k=lV C[Lapetep@R: r8P}|foҜܡc{&!e-Ki7bx >Q JoLvB]!83ں\-'d7NEIT):v10GBq]vê"_(GFEMbrzJkc/?T5;⿊Sqb:OЭ3}hEk:&4ˤJPBOuDB, `ۖ@ѫB~* b4cҠh5du[VDR(餀ٶbÍAXRx1쀢uŎ@Ҕqr"T hXT̈́Oy5D

CSQlIůmh'A)̽Ap'R k ڮ#1]Gjp.ţaFexMDeN#c) && QGN7vl'7غ~w't'X@! Dّ`D#vu^hW3wo?k0'5I3O8'ob &.G4XL~ݛi;oMl>[h" ㍷m{ozvS ֮ISХm;( HH $Ep魢%\Lt@) cR<HSiExl7yAjFhԮ2*L2Zx$;,%W S.߃*²]%հ$|As,&`ԯvW ׶qs:>{(_3Ih wق_#wi쳷f'vɿj-Ah`5uh /t-ܥw!pL͵uw ]S졫F[յ^;ly7*k*,0"  Q͝qݗG# 1[N8xنB<1`DdAηzKcOI SDy0D q81\PShvՁ氂[EMWcxE1b@]F?oי-F@sZ/GʮW7nr k>!ޠ1@;頯!TY=R >sƸa3Jh5Ez@pyD:q ).tA_wU>?Jţ=m5QV)9l[3š ԼCGA'N[wrik1J36$?O"N zg-&1JgZ Nݜ!CDw3׆gC4IzO~bTÄuXaCro5X N$OJSIӓTu6gڳaOr<)(Ӄ'u f3U5xRXjO~F-pg3z\ad{ѹB-CsZ\a' QL žs"CqjM.?u[3ƃ)'C`ZacЁFj%ưNA р@xOjO & 0k g|O /D@Od|&3(ħG=:I}h15m#Pk:9x5_W'?4J[ E 9)Bk9Iޛi*w;%K!!cL~ l(Hm.gm؎t:a6]k fd_zft5S #x_)շ1e A Sz.Bc|1 |k6EȩUជV|Jor]0|Ls AgvQOK7@BYAV,]WDRK)fLSgGXr<2Xgi?4bO9 x tfOzuE* 0JBˌԳ3u5xn0e*1q2Li@G\T#_a7I>rvIȩAp, R I('ܸd;q-rjX Yy4i-tTa@%M+"Lzum0uQk38R.VP\ p<.F\NJz̵~Fn K&o}~߿_;˯bLF=fHQ: [,t{w] ]6;=gajg8<\n~zx!gɾ 8 l%1@BA-K'e[4"$ۅ9YYW#4I'6m}Vebհ5 AxEȫY[6 rlR.9>4y K"{٣wý`hK OVmz\ ߴgB߾4vwݺJg8wܺރ?z9| w3ct&(&eÝ6xxw_E@rߪ,/rcW^[0mrdzcG--ElD]|Y ]N{n G(wG'oJ,^~_o׮5d?;.3"=]{: H\$P݇Q<[=9H-Z叾M(+ung]ƮqۇW1ݣH92(N?jx(:ayE8!&2H ~f.^ٖA[%"T3!;"TbВ!^lFsWQZu6+ΑlLlUm-V`?iGlAԆGp٘&AoeWɀr2Psq4S.!\v &:~ )Hq_ebQ*cLU* TIFG:kN;˂7~`#VQ ֛7^Qb ϊYI<Y` Ձo?/u>s9Ki_PQa33 dj?cff^]1:{qsm A;H%"x.[:pf s^;<$<ȉ/,ל! ]=BJx~A'Xxsar gR 2J`9BͰXWCghRIf \̱hЊ1 or*3V`:WqBf"5X5S .X(%VxVrkfρJ HLѲf^9̞b^ewㅺS[{C-\<]{܅{$ >#U1ZJ=RsnYu1=^.,o?/=tBfLojr3wln:SX=z9tj&x/g@6%Al'sƴg߫2|g8P+їR?CB}*^i[ Af%[{ĴlL?t Â#_/pKӞ+At|z\. 3mU՜.;e >>G?oMs!4&\xwߟe7;6me' 6T"ջ_&j5X $I[[SۙkmXX6JST8NF ׬vC|d :#,ݏ'k YTR&< =x)Y5_&qC!QF ’/ŽCy:2|K#ŽiJ9m-& ۛXf 6t&-bC^<3>&'[z Gjݗte 7yIX $9Ӏo."3F3 Х:֥·Ùf  X (k̇gYEwm9"E+{4{0p&0|5t\Z -QxT!%(^jn-Eq< {#zI2Dky5dǨ\82=dpm0$^J%q +rI,^QIB!|g$)S*lG.QVA>EccDq8ǸDBnge2)S)p@WgH1WglL2*344StȢMLG9_{*|SډL5G(Ғ=Ѭ7ړBcyr̐R=+a;?@4  Fc\)$Tu NY/AY|8!AA:YK, lwӶlѺP}j_s7Q1Y_SkէU lh-ÚX^"z\c+8!zF4S# jޚe'58z/?3 6AÞp:҇(>xKKxPY ٷV]q @@D:5'.G:ږH X)SDq?W4^%jW-ӗj a%` c{kM*ezl- ~Xd ReTzK?@!8J`aTt%τLm!Av2mwSW̾|?*b䓶|IcY\RRiS{)J/:,x%ƽ搶ycu5ա ~2ج=S}YTG?WQvƂ~T5/a B ~5 }O-Cm^n8݊ӄ]%1w&BzPal~5GGYbahE>Jh}|stFV=5PЌ-3>% k-db[^@2!N1H~c7TkgkrAZ ֪'$huK^qjӹZ'ΤՆ47ӻ}-Pt N pYU1B.D>O` ܻpk׌*Lzd}ru@;)TUTe. <[#s:p#ִ  }auSMTVdFcBNBLB= kZr{ݭehJ.b $œٍ]Tۅ( Ԧ`QΒv'WI:$R w![F ]. a^?dUS '1 RH52d<_ʼn͊Ր-w҅֟|! Zw`6rŅ#ƥpK.rncbŴ?= `؟vPcfWڞ- /87 J$GnB;)-8۩Y2.X%`dž=P 3 CN,IbyBd2A^pP5rCpM]d=-'G;t1\P.;' Ը?(D\y3rƒbNlzun:"t{{պW^P~$0kӡ٥2?&ɟh]ӟ^9 S;!X !z@4y yF칔XgO.'V8uxSن/r0iLv \h-$y4ƆgXb GqM:AO9'lvuC’zݔ8\v(J?'NH,FOg2)ƌ dz]%͙Yqҟlae(䞎b<.4c$OjǩLO>eͰo1pXn8)rБ{/(&Dbyzs ]d#O.C-O?GTbKXb+Z5Ř{U| H ӱcev"?\0 T;ki4 sX }'5('͚sR.JKR%AqZ|܆8DY[A8(`+|ޅDYuGɮ?\r'% ǰ &ąn}ޯnm޸Oԁ5n8 Pri.Gs! EjesG| Բ؍fO2=t"3=\5|Ú W\OT gHB"C7o?`C~N7b H,NdMlb<[~V뭏͘"S%ƚuDz%,FA05ʣ&{ttURub}yq9^\Wm5pñ?.\C3O4DW;1Sʋe<͒ٗROQ>ׇiv9#WYF%ex4Gi>@Wؒ+| A;5j"WSRA:S_(fTřuc7E{G0,@W1,ӚW%yoK9zD]{0،] n(ǭ?T_V8f_MxT0g-RD8ZJxMZĴ0ʵƺVCa/Y@|IxouyH2sOJ;$vF+n(p1VV'~-?s7:ټ ISc&YDH "Q%8V,hXB(ʹ3 Av#['UՍk֪qi'bo~O/ b-rwЏm_ƫW^B]oXJ{ g~7z{h\Nw?!xO5@zH;O]ūu׏5hC?*b5a///tTCMMs/GVS2UIU.m{gxue z+/~\3B<)[_Ry:uJ:UCb{!ӞBR_o SBѹ⯯E9togFSYOeJzT+CDwmX֔t)|CWCza%Rsx|Yl_o-Rh~j4Urѣ}/F?~6E>=X)ޮVWWK/6'[lOFt>}4OWƊE^<,W0Q2˾tN-V5Mӌ[œj_y̠Q$"'Q4J eU5ˇɪJ3!T i yL^vzP*X*de~؉~8 ׏);!CՊHY$Gy\X9i |/C()U&&$cA5A g<&grI3`N xxք̐VْʘnDr^4B8b׬`pcc8Dx#ƠH(k s5lPL[Q3+aqH*H,8%E B!"RL 織$ +Yg>~C?g@K"JE;+x`ޒ,=4$\y'={eTXPV)Dc/U# WQAD^w+jRhc=1rYIw( *21c BxaȊp$Isi. *S8J\)P KgƸ?W}ާKOW$\%y1tj,ses MMqT>r68V?P&]emWO+%zzSv Ur]"f7XןlZ mhw+Zt)x;KO%Q-w_11q癝[q")@T%k)k^^WE%֕VUG{&zJ2<3QҦY6OW'іY}w_?\w5 NB )~ ުmtV=hU^L!un2F"P mvj!ޞ.ޞ*f+6/(] Å/+~\)J-8k3|N8G u;LMBg!xr8iƫp;ʀ]ٜ? (J%'l a瘰Qb@T9P$ PJsT)%N[L 8cR2Ltp-6>%##H%+9{8Ay,ly{#IJK9⟂Kd8<#* i9vz*$KTYuuzi taMU<[QWv E!0(]ʚlͻj/nqB.|H0DŽI%Gd& !MR $4*i>>ߠi5t@CqSuUD@Taa:h O/ 2Z).f0% zڪVŐѾ̙d19srΜ8#E{R F,&Cfz Յc @X|DHl։8 dnGK\qNA0wo*3 tc~+S ,'N`L1{B  ܐ-2OTk,! !T"ZاQ"dD+;vjH]'}O[?&8 z}1.Q8R\)WpykTiWtP4*=?k! 8g_I1POyv =M`_3޳8z$goheqP|n\T<Qrh< . l`%Гs4f(10:j.(F^zn#FG^ 40!yA4P"B2%Y`"!e>HvS` ߟjs̨jOʌP#8ZhBmWyӶZmwb1fޮd6opgQڃِP{OE%,# #*3d3V@ ])"4ᶅt8\HwCVN ܀ P AͥuکEȁ/V &PF8~BJ"rXZ lX|1q%QkOӕ ~*e|Gԭ<pAYty9èP{3*xwGơ8Af5>Kٞ\E㇃"HYEU$$:g㈢ĐAγq\W@<<HǛ]1-Poږn݋MWCʑۖTb$XrJȲ8)gyΥL`q;g#RRիПֈaVUʕa,o`8=bq@q[%JJW3yϧÁudC~|e7x\vqM-^`'qx3AeAH(ù*b.+E/,&aCrRNk&LC)Ker=W FJq$vC3&aGr4j_1ٝ i0KykAU A몾"p|8I E2E9a rTJmaȉD;7@DsP[ y8_2=3F{ UvHZT*[pM(60G˹ BA?d; Jr}NFB' !!qDۛG0ul+4s鼜.[L6xsӍHUoXD'4P@rH"vRɭ%"rIьNnH%>=A fĹ7:IkoxZ{Sڕ엜%*g87NI 8L rvql1G&È#exK@q/k`:rM^Uтr")tl,GufV)xoAL1YkLۢP6G *'6#rx["J=@x,f@x`3 0:KSfqp\!FV+T&ScZ.{0*suF͵h)C]t} R,OFOPA뇦Inaݻ4XAP9ydtz _^\DɘEA|'^y1ϻ.1W u,$5s]vIߡiIMi>]n).v%,zK8J6Wm0{3U D,Qٺ?5ISzUo=z+ ݣA_ӱFe0lmWz+onr+x4݅`yuNVj4u'l~}mRZd*`@zQ+_uB֯"]Qk$nY}d*~-<=6CS^M^Rd?83y;4~ۄ:ٹyh[9ΓF';=do}ҲN9-^Tˡ -%k}I WvJk2K2ݰǭ/&tR>ĹJ>3":_p ;XwTD1@BS(  !S1yqei󸇬%ZKqQ>\'}kQ6%q^:f~>R|ĥ*tLh&h&j9L.}l;훍L'&] w,x.cLhtiQnOU˳ڝY q!;tm~Ow.8뇡juz΄׼ #Yy㨭S\MV91==k|AbjRĺj pQk,!B#j<xn3l*xaٻmo*9 03Cg^|nI@X hr7~B>z Ny7Kt:9$aT7*F2SI5S<*ƓU9QζYL%Z;OPO$mjsޟ(űlhY#2Dm"pf)-4H(|Zɉ޼`Yr*dUNV6?d˥OY)$c$իS_8дPh{XafJ &uvcՒD# v4)6VhHB(#I[)Nia1M~RB7-78XEì(QrD'NmD+)_8PW0-MAo(>ii[[8 CQ|Rm7W! QS>$)R+ _y_䩉{.NEƖx+KQi\}|`w?l/5VtXCWg)N? Bwf:fE6;y?ms"Nf ΐ䓙ٻw|bwJ_o@KtB_ 5b"L? 3΁uX\}]A=igA ?>n<.즊/N ŕ6!%Ɉ 28,ŘRd:CB1 p2Dv$#&S%2Le|iz@ɇrgLl_`<W@Y9f2syh#OK-?:遝rⱔ qcIf  oC;L@ zqA=:rB`Ot=4+lNahP*mA ǿ7_ah<?Y7LSu[O|1]KWK[mu1G6Gk*C4fH'5&(h3k7koJb =e`oX< [xGڼxN6_0J ?# J3#Y#IE!3&a\"/aq:8̸ƚNiV,P6T.kӊTY+h\`'S8ZI| o 7>:䄢c)0+m׼!_jKl X\AjA{ EsjLIrk2Z JY r )++unZPQ"Pw!JG{tGi4VUqUg^~ XTDM4SD8c9Bj)sa9ΐa1Kj-5XzM/X|%WHwjcp 7%cʞ7am݄*9j a)lYI). E kp!j-M9M EZP,q?m/z} YKlVC}RYtG,J&Ev擘&v XWɿƳov8a/2͡yzwon,;|枠K|'6e*Jrf7?;`iY??+ZZ*M<2PM}1מbexfQҐ/\Ect #{,UیpwAĎQgw;}2uE5!_FýGx DubǨҊ`whBCw&4 W1E_?k .sxZ>)FmIXYW\ 0isgI!iJpA |B.ք*^1tAY aњ"SXssJ"%99XELK1"fQ]_149 An;F@pmdJQw <4!_F?I6w7U)q-!;Fc]j{w&4twkBCp)%{87v7"*by:ciEjJ|ۻ[4[:EXbmWW#J.Rϐ7*Yx$nyC<cxs+P1og*k2BW ,4u9J$eVT#)`aٻ8nz6@^؈7 -GWF# řԣMvđcͪ_Ha,ƞDI4D11*uDΑ!2Aybe,שґ[˯s5q 4rXTv%9x?n,.!fت1mqT?S,a0c FGT"+ǮH̀اؕ3fs[9X˨-ZϼCS6cYαؿYnmlVdn*!?<ϳZ'}d -ZIJYuW݂;0Qx٫kvj袼{W; TSՌ&\Ph37wZp|2 !*Do%?M1`UXk{T9\q{r#wVm'袽,;^M;uv{g4sW?e.)l.yvOZoj8;H)4]^E߳" ʄZ0aXk8)|v}}V1htيZ"eQ=$Q{TlT/z0QF|SAn<舶n,h6nFoFNݑZ| ݎ=Jdּ纲@&l؟> !fh%D<ܥ3H="*TY/%{ % ˉ= @ӂ_o͚cW) )Gt:1*MB9XgmEO`bq1&G%:c2Z̜,戥5 nq3<ۛ. *vRd;s^[w.?M^vv)hb F`-gG;cDڙϬs @$VR''k=_@p haA3GH]wyBKk'%w-%>TI3PNI]Ce9d{Ϛ"E>?ܧOC"\<&Fvو]y3.V対mٌ oLSe=ߙ0?^ݳ*f0Y pHxs(H,ɭ 2Uy2W}{*GԈuap _ CxpwǃqӗO1fAQ>cܵZ&>#sfu±]BNej?/$ 3l_ӾhQԚ ]* >`7˾K}Xv˚ ֆe-1de^?Օw?Va8\8L₳>p 4#&(Ef15ڨ")#V> ̜Ʀ#o.ô,PAc$LLiV=.Mwi &.C4޼˨Uz]?l X嬾qv`'1Cx?A<,v>ų~\m~,5Gi;斤Q1-o'?0 Rc76l~RlcBRwςo4|$[.T ݔ16qJ%3%1Lf!#=Eg7PÍ\ƈrR-UpGK [xLF}-v]خE|ZDLm^/&4܋ Lc 3Fi/ٌzFBKL@#c XZ=wSFEcH8MmhG fIAhx1i3֒k>]"1z1ObyL 0 c H[va~EEcᘲy y~ (-\7:=.%HS-ad˅kԀ{FLMC9͢v$3\:Ž kWh,sm'0{ . C-!s,NDTb Z8CEIį)t?9(>^Nj.b~n2(#Il), A2A(* R,pFA:b +蠾w](3 &aAƂ6+b(G< 6[Z,)WQb2P1׎ eЈy(Y|XSL#EF}N =$Q`Pa3LB4hf$"8 Bu , "i)s0}+ P+=N``֍a50Er~^/brsKoJ -p6 [̈ ڀZ>RT2k :'!'äZqwfO.,'}b;=d-# lzhCL^t69K[oP`lNYiPxgTrg^F<=kx"[Ъ[k1/'qc6[ `Zw̾^}iWڀ(r+`Ǒ 3abt@HOdi/MӷfXT5@s-cƧJoZ)4D"-M A[m 8:*<_\;y,tckf~|~fׯ>N2hQL*=E|L|^S<V'>)sPx!%;;vI_)Z "&lu.bsŬoapXuGP:DuDK)F!0`I6" Vr_}>0kѱ  =y.OoO"\oqzn4vuPUAK}ۻL)Jy}acԔ&yq3=!Aov>ų~\m~,6߹ZK&kn '){?BH>ed1xGfVKƋ7Ƹ-&kxY_}J;m5ztwOE[D%8-uaiO_^JdGs}5Wr׷|X UGӏ >O:HQ59(n&^ն!Npjy< ɈK~lXtq.q I؂D!$f܅@PtuҝЪc=pih~LAp NFM?\F&a$ tlN7$TL6$ɭJECLbUzER5*JS4MeKjBB0{Hn_=e2n{d{GId߯(veݒ10,X,qS0y2{G88$Ec;f>˫# -Of7 }k)Zi<$v.<1e6U-plԎgs R-)|#q.PPf-y AWZL36 ]+Rt%sj麻QYL?ri|ipZ U+SpɅEȍa?(}!kL smTԂWUz%\( Eb3x_3(`gp!v8^ 돓"6mU0 <)bІk#]e4aF[#p8g8XX˂S #T)"̮=jOUE*JI;0=q)}B** ߵYym]ewT7»B(A`c7X-tp j(Wh2_iH/=^duB 1pHr41Ҫ4:_o@#,:"&Th$h[&#,5Z,i|3a,'U:Dr´Z f*SJ` R=%)%3#$#O& o6w:\qy<;M3e\\ vtyeO)턼ϳ_ y1'1\IݏqOqqTmpml\fn,^ǣۊZRHؐ=[\ŀ٢$$:,Đ%.׋wLX \b㬀@~ hf:s0ksQh=tBaCgE<%*yöDg C7EJLF\p[yNǝ*`DXb*xOD"R("'LQFH~selAk Vj!\+EV @Qk)!UxS[S }HtK&ŸB\§SjClXNIР"T.TymM`nm*yNP+!Ő|(ʝxE,2+aP1jKfRS0:|z-2 @[JCpVm n:87GH|_D tN m T-N%?Sd$fv?N ?9Ex"LA =9Ek2$xJ gD%@*;fq!H2ƣJc]%[%&DahBz Nl7^0(嚉S4h2)v&uu}My#)9yޠF(Nu2Nƹ@Nu>8vFpf9O|@oA$=MZjM8e޵Q k}:&+r 7H:.׹ *D U%q;6Ԏ2N Qsv@R^Xh^Xq/ CSL]ȕTD ! T AD{aRSZ~xu"^=aᝒ2E5[MPL #1( 6r ~a oqU4A$n`U$@50"P^sh%k] ,jC.c\XAC/( m0D#`JppnVi$džJ?aB ZpLa&.8IMeiލo]l0Ynh,̒x>uѯ9݇rycӏ_<J:ippJ0/erEtLSE8k>f`]NzTa!{ؙr3Gxs;Ǭ F7C~J`ce\ tZ@'\Ѭx??Nokl`?ܟapzgX'by?qp^n'9Ο阯0sb q>8K7qjXG8,od`۲p+V;8V?ۻe^VnxL@s|[FJ4қ[.|gi{&nYlQZcA\C-:_fpsfqQ`WmP+ = G R`9WW$mЁ/y{Cp:_pm7x ),!萡Ukw!^Aw^$$I Bn.LBpњHb4E5ZJ$ )%k}M0kqD?uɑ8!6'Q2^p"uy3f5 5:]OU aTggF F'Ő!8/sQn1LR'06b`@6[H TД;¬l?~VNCK ն4?wݟˎ?`nQϬ1ª kr4#nOYypԟH\ufb 8~8˔ޏq߿O?{90r~b1lU 7h^h .pGl 2Ia~7r{μ|X51n09aa>F$bWm*؈feOOn~jpxO2#]z?g<8Cq:8? 5hbFz0w'. t$lAU <ˇ6 zXDRhSML:W7V;Ji {^?rD6gP)q`bcYy*a8TG2i@P凡?OuBJ6ynn̞' $uPxj >^\$w14jjoQ5__bֆ3M+A'%6f BEVDa8\Et9LfkGﯣ-rw;8> Qä|G_bl6yVc?yECCZO6ӥceui)BrWY{Qa&17[8ՔZ"3u0 Od-vňqΒ&u(Z s5@ a,HFQO`$̑"2PtPQ(A*` &^! |eDp|6~^w~79M;sL t?68yَg;{QtdFiҔbJܢk! e3Hr(=6TiUmlf&FByf Hoqr:'sw:"mc$4`0DyLQ:*fN)㑳HWmHWm|G>HI&nMy'xFIe2IUn)Du*%2HVJH|gƲ̂R,=,eI, ťzEX`ESRQFj= c$`j(qB֊"L$DӋבXEXH!5:ja#'>g%)H&i0>}qIG5^]{7*#VNq!jH-Y}%ڊ&#&cR~xp6$`Rв5k#R)t/( J #% :Gt(cek2dBVkI7; @`yJCm_j:,|CH"KC@ DqOiNĮ%$ktL1D\ 8iEiK;a GnfiS=G!l\bWJxI%5:gÚb:Ii>Eh*,)ʳ' CB=@B!W-}8ٮdyb8_qoꄾJ$jl?B"{- S2N !NUcBS)e\AB|@q摩l@x!^{ O+^̃3.z7T ?կ "܃1'L+rC{BD !.'ȑZq]wwdGnDKDAbp*[b-k]""5`8ƨc BJ~^a£!sx8]啤]{ѠB8(!x-w[j Q:*8Y)b].E%]7Y,VI(S_aFۛ- 7śE{ 2$3G>}p2as( ~s$pe5-Hix-,*QKfj ; r.LSB睘ȹ$] &XT_& ~*I~TEUR0TpDl GZ>pTuwbUҀnP;+4w'O7Cjc]R**F/@܊vw4!bGRPR.VY?,gM> MJ/.nUsH`*Q#N>Qy62v/J~-ŝY|:V2{2bܖtdJO{A%pፁ;_׌֛ /gGk*` _}Z`zXM5% r306hE/ }O_Oa$5k4<.LS~}`̻MK]F!g17'֬5ks v0>֬?_k`uaNo>ӭ^NM!^{ÇdYC^C,I38mi^ VEz1ܲ}X9ʜLꎷz7q3ڶ y~u⶛9e:c݅w;!E AF0DStUm(kFvSc uXb5@…HFf=D3 \hMAAAqq 0kkVx[[ܑj3ͫM,WRDmRDօGHkCDIh vazV%-C5Lfc-2; I =KcԀ)b<.{Ce]P: "r0n#RpoTB(u0p"j#PD+"Gj Wvec40p@ qF5z ~Jp!r@m2 ڰo(^;,Q?GV W#9F|XN,?-0`a~ NR|`4|̦$Şm{VRD&V~d ?2MׇcE=S_i0]nd )YM||7I'Snޙ+l*Zglfe5C~gz_ie7haX 1V"Kd6bKږ'[GG1cM򫯊U"s4ɋyIF[ծ&~+i'W?d%/Gο䕄4jǀcp{""X(kBl6YyυYyVvrW@tO`SIQ!HYlEt-ՖXX a13sf ^zrӂp쇝<$<Բ %9yM8ı=y8eCr Hrw +YCp艔),%(8vuz#G;]̅1v~D_ cקi'KtP~1]_ `AvUg_0(; g_5Rj0^KkI:ZچS HhB+"ɆVD 'ZN6"lhE:ЊbtrƲ}xy.P-hf ?ieS.gfV0xk&e5B$%+g>* 4Nylΐ>@ay:#duFꬶ\E.f젥]a?h# ]&㻺heW4Z*ǎBe$7K tZS vKcoZO 8|x2{~"U2 /^(o=98%pzw D<ͧF`ԧBә-npXm`vNv EPlyg#,N-^lqZsf<9Dmpt8_kG_)0R6nl4Ů⑝¡U;jDs-J Q:Ljʎä"CX;xgcqoxḷi2 7p!۱ܲ6g*܊mvS.(]kfaW ,wG/גt}t8܊ >kD7ZG|vKJ2]9ՁcLk$kM|3.:.P;7v42ω Ǣ" b4Q(U*wIfE Bk H#Yr#>UIC6"Xʱ7PP % 6Pr.wřlj՗W] d>r]w}3og<>+۫|Á*c۲!qKH3]73lkM-{ a^SF7lkgn{-$S[b\=}Ez\kl-gl8oY1L +ܚyoUnZ7˰+FE(j= Θg?b>#7rAutЮv]wЖ l{ȆjI[ Fz '4VRtG/t:\\=Y06t) C r tZ*ڬUM6tɍoi!u-H8׮Zu1Jˏ]J#L'a]Ӹƞ(.כ.&d,m'ƓW) #YƢq@ngK<@0;~zZ${^gW=|eMaȉS4S[7 nQXT P'MVSx<\{-܈֭ 8E8UKJҚuTni4NZYCFjGz2Lcΐh Xm8Y7B8떊Aꤶu;\!TYZ6rMvmuƥ>떊Aꤶu;\d*[n [ 9q&q`[71XT P'MLolh`ȉS4SVpeͺQBú%bЈ:mldVehݒOhmjZ`ȉS4S[n=uKŠ uRĺ1TнYÀZ6rMT*g׭BhR1hB66n2n}7nm0))ﶿ 5qdR1h41&?1G-@[ 9q&q ]Tu}]{%ڎrbk[ ue[M kT_[V VkC];I+D(k5AؽkA9}}O_Ӫ&ON.+ZUMPu55=^=f)䘁S\t.Ǭ19V5ApݹŠ>[I41+Bhcs̭j!s@b}1 ]1+p"Ucs̭jLC1}1 ֽ319V5ܴ1965aɻcfjgB913],1FO9>Ǽ ^3csjӼ{9fjscnAƨ{9f=k}$rRIҽ3t9>ܦ&(Tr`4Dcs̭jC5A1965Ac֌9>ܮ&m-cւg9v5APѽ3t19V5Am?9f1965A#Xrqsc&gZ_\OF[?tBD:y3&ӻ@l SQE!s 3LI9&X iS\|4,ΗMCfMpl{9cy0/&LgQ&g{n_0|^%k%.^Nw΢'JE2Bᷦ6fj3TK ,VK^)| a-xƊS^4QQB(E6.d^KJo|3 %:fTĐ (+]5|z9a>V6擐2dU0Gx1kT{z7Rr2d&UD"3yC3"8R9~"eg3 >DL,#T0=)pqڠ@$XN2Z)Vr;\ 94dٚ kN%DcS" TRa'sFE2pWHB $/0BP)1ŵ-9O J y|/p "'VV곥LqÅ]])KZ?vl4V#aE\fTA3eP@s\#޳DaL`b(_0Kp` k<BEj[q:qqN9مO%Gu+z~w=2_u/^f7|a|nlzJ$޿"W>_x̔r@9y=O˾ \oRxOESʇ AL};/-j(d^Sv:"S9h|q-~RϝLm pU{CE Pl"؀ǔ'9ΐHhf-l^̴Y-/Fƅ }<(4} E =6^n=cP] ׁh_&aO0ŗwHhZ*Q†`ToqHԯf & BUμ<*|5ffoRfȷXq}E03l2/P&T" #Ţ+1GWU=>{[p ~o/np>p0]s7> 76 UhaWןw q& Ԃ ^-Xe['P߫߼Gy!:)yYFɹڙ[Y4xGtfƳޕr70 Cxl e@չ4RpVح&vS@fSv.ʡZ[~f5>Qk&돴ы~M Q|ղV/ `!&e#p^,V/ ?-CIc,Iv$~B="9kR;QUѫY/Ƶ\!f)fs׻Nz}JP2-ֿ޾|Өn8j[T^HQ|TZUbYbh 6qOTE6nYU/PV]݉c=0?@NA|P Q2\וa;NrLq*hnl%__QO7W$G{fQ0M9[ n5.&Emk;+$]@1 NjFJeͷ8vy3 vHoe:|.[-:[u/5QRnAѯ ¬u( @84~ZF0.*+R{CVzK1,!mAu[ENcs$Vm SAtnEi̾=MV3, ;un~*׋dJ# A5cYu0ƸWe_YN}ӝP0 >ͥW.omA ~ΣtY5󹙿\9wfAɢ\/uc(}rw); 0sZ~iQyd&,W˴ky훀]{|]˕y,uojIS'Ŀ7X|lL<z_ no6$9^sH`xx,7sw(:eNES1 69lΰ[RmvWir2'V74e(!WD,oQh, Q+#lҵޡD"MZsiiK*YY2ce(YY|W֔bU](&{=>=L]]A KK';)b8gם}LKB!Үu]PSg`G@ ǻ͜)6G䯁SЀ*4' Pk].R|쮾WlmpI59lٯeͦTurֲ=NZV.3F6 jROKJkgDSR)O)̢&jֿ ŅVZ쮄 G!c?Tx;;W <Ӛd&+a&ofkRigvnOd1Z}O0qМxL|bDvkL17' HdJp])*%LW")Y:Y Bs,;'P✑ q>IOߩ'Y1Fj}ƴ$Py>HݡcIr\Ԫ͔Nv_Fƃ,Vɜ;{+e+iIF7m|7R [LA0J8>u1}NMsF#M&D$s;XLjhSEkv8Q%8ўII+zkOP`ܩU@kۢJwiY;LJTW3A2 w1[Fu.s~ר]~kԯ5hq9+QARKT\Hc1)c4.F^u~IԼ؞+Uht;iQ|ty3)+D?\V0 迌'cvШMe:x.~C'zwW45U~9mS ug:O\#,~@&%>ܧ(_6vmY ȲS1`K}51+)eyW0}uS/)ьF oӿM֔~H7r4gPIXuG ^(69q@ 2\vɺS͘u_d J_5KuP Q?3+6s-:K9>LjA5(g`Ԭ{-u: HpKxVP_bnfGӺ#ԥf ؙh&k A}P,S_Oh& YEf${h G4.HM$q ԼӤmIޤonrFx1 {mDžֹ֣}1#3(AW;+~ vp{x0og2ff)y}8\>t`8|woaPlùEͭc(흜ɹ}r:G_1ɋi*LoqN'C&:8苐yw ~pNbF[(\yb fvN'pzp ZfUrEQ'g%s4)@p <',86Sךh ofm˖Byd AH+~|W?1Y9Y[)7~"2D$+;I7 ޡV{vq]^lD.VA<=}kg5Bt\qG$಍.5(TN,:PA:ePQUE_RKR0k( '5 Hs#PR.1"ASGTQ pq #N_]jt2 *,Qw0VK=^hRf'$ZIQr\ʸ zCciDuMZ7dk6݂ԫΏsd$БT#Gz\C&:c5Qk,Z,gge77 Y+n\lfisgHR ֯8Q)!@4xKcGr>([2|HZPu(&nD\NT FJ Y/062q\$aD؈g2<74@*X4S+1JÏ4'ſJ{]唴 ew5A"󃨤BGwШ|E0FEG7!F 5!PYipO)|־C԰Ouw[9*8\uF铕GQ"AX2ͷng~&;]ÛI#rڅbI=ٹ"%JE'd$laIm,Y1E=*SCS3Q™%c&d"nfwLS@i4~nw5../|2E`T&UG$x-7ڼOvTX_ǑI^fTf~Yw`cyAɣ:*Uo0UJISynKd`0N REap8В{XbvK4fǣ>MiYL^9CQfq  ev_ݰp-Θo)yHs:7ЧR63/uQ$!{KO!J)(HUЄFB_A@5([g̷e"rĩM9lmAv ,2F%gB-%T)/Uz;}RZ8;wNoe^!vZ.ӞZeQQ泿 ;R% lɬSE~TiAUQJrG=8O=m#ᬅsk'aBPr'Y?`.\<6q$G,zq<9YL_,!@4Cg*LqAU+,x&/KN]|\"e=B* 8;wO=M܁O=(, A G*3iO#O+?ц43,oxvbU*)~{oˇfBH{_ #Q?2F?Ap\\/!`ti-n"&biO/&HyC,N#=R&׈jrD`I"].S5Ȼ֥ixKG Pډ\6L{6lNPL#P LǴ(` ^8@2COIr#IDiaےzjb2 ?3@:?M-haZt4L,fݾaEY_XBOu xtf윃v52Iațh}I0q4އ]{ >lq>lտ3B^Ux]m B Rms& 4J2ɶ“"S{ĜԼ7ݒ2C2l$8;i8pyJ2bNߘOg*FVİoƇEQG/QYH: e,qF@jj$Ũx?N €jg 8@ >)K{8z1 X ;P`[$߆$*)i~a P톱~ĶWƙp2JKf'?}[T"_r1,A[Mc~B\{2Rڑ!iy')E-mULvΉP{2zy_oۧDH`?dIf:d" &\6Ez}MH]i{%hC*zMAzA,Q@_Y^`gK%X4*_O!iӫⲸ}u~n %H1*#) Go1EC}&m< Z0K\O#P\յh%E}Dumg._y}v.;<׬9p֩U쥫x૏B'i/u@Svfd63.շ;@BM5ϪRh:೭8GDJx᧛!Pѽd=ݬLvYX?}\9ͅ cNgt2YܶC+=~ ,#N0Β9ǫ g Aޡ|OOyP@t]t| o !2,V![}%oqa9泿l (|Cz`g6[ϗ=>ىHA3#rӤ%q j$IόGs/qGF'IS?3v^v]NTFb^&f1b+*=36C3us֟i).WxK܈Hd=uȍOF)3s"6 ]51iaRno.Sdᰥ7!}]^G  a96(kT1vjP\IJÂc:sϠH ʉvE7ߎw6.\(.~ Wօs513eF"KDc8]T̥JqL\mg=ե;]r-\~ ?.UY[O6RzJOB BU"ػpJNqoWӏa9wwokRцB.YR4HBH 0e3TtqvWhWk*\xO Y_?zv[&h_- 2]|_|tẈ-&Z@ed@|!48Z= Z: ,{XouT^R4 AxyK/y\oN>{б4L[Ītb'r8z>M9&]p20#39}5ELMp\5iŞ+fY c{xٻF$W RyD^ Swֆ {<~مW2IMgEJ*%fRGp$*̈̈/t,\1MUa<*`exn"',A*kXLJ;\x}@ZEW)d&v.yđJ p֟-xg02S*9QT8 [TSG xmx"$GQZ}R脱<(l @ա;R/ }5AyHoqe2/D04 ]5$x2\R8 yX*l5A86r0/L6@.'S7{M/ԝ^ iջ7^ȱF0'sq&ńBCA2GF;qpJr\YkWЬ|se1њxm Z b@ 33Zry5XgLFdkd$iN&!aE/n  .Z0rlw t_PG8 j7Un:+C\QehJ/(vl\]!!Ib=Mo/mt18h-]+|{.'EUnLF,||ͫysL0eZӸeT3Xak } ]e M7+%JVtWy,uLlz?p =bMyJhky~ S5NF)M`LS[F +}83rP콩}L#I4c~u%j-Иuje_e@j)nz,zj9:Dwv>$ TnWuWdnrkSs=h o[ۥF:T ܤ9&=3V:iI̙V:$"8-k31ӹp5*&R3 `sV˱,]F6**_$[ m@/"PU,EXma"‚#޻@E,L$$Z(<`<VI4C(Sm8TZGtϬ&H"+XܘFdBRry1r"^?|[ .ƫ 0ƿͦWj$1;Jp(5kPhE9;?KtbEAJgBoWH!./[3CZr{ JYvo7V:u>'r̜KGWww[^+cQ;\ǤQk=n8gqjWᴋWMW&yR6)ـ@׺\T$ EnbUݞ;%}7}iMr㫘֯ntzӖJoOaI*dQn\!8-%⬧9m:\&M$#*VmWmtj?[ ٬%U{T͉-3b 3]e9x50\j-৤_}qb 3鱊N13Q1yG3k*~2 do&OGxjOEtx} LD<#E_gzoWm*ރAT#$N=ߍ^]A5hFy-mxϧưI@: rYNBsb /sQhltZFゕBy97u $"g(0FU*"o% %)MGWjsI(/oҹ<\pf?c>vI nAf?w}&DŽEG'g?>lX͗]*1IGn %DR#-RljF F+G%v?NQ Q6*LV]󟹄v# (Q8|+i4/,' VFJZ:&W@K}N%JWHMW>~9v8I?n"]]k~6)P ]FqD*<9”whd!/g"eB1Iu,F%i~)ay6-+f=Ԩ>|cv?K-Gb6tY[-.c_4~ȤD$vG?~b϶͉r.hečŕaHDUF.蚥Z(I AIV;&j" ÷mΥ}-5꜓_ -:kТAv?F'J3(b^g87A9Jc<k`ұN:WnrjaC^oQ|eE\jJ5,ES`uݛxs|^&Ё.]JS}os]T߰+jbAjB&'x 2Dn3km{l2tǽ~=>O͑MiyӸyaG_f8fzCge:_̾7l:p~|)O,u6QJJj4qQk hN!TMr HBI@yܼͩ|@YD]}0qOro3l'V\oևk|uGhI^۱q &J{×hvuQ4ۦF0C;a~i~mx58g8X8jb$mFox! -f8maLitm_*_?1oԮ 70a#:!VY7f  G {GS%()1ˣE/;sS^2?%Б 5 B+k݃SDg\i"TWd 0^= ?cOOͮ=m3jk8eԜZi`U@(nέpLsSs"D̈́NkWxlDduQ5jʔ| -³(kQQk*)Fݺ+F\D Fp5ThȚH䀕O' ]uܩ"|Z Ab7@-֨dwCq5k `L$ ß??{by7/Lӧh}.eWs冹dwvq_vv~5uW'Wޜ\Btx!u2 /L`zMj"@JJ5)|f+uFs3\̠jouNZ0`(BT:BC#݈@ nL,\ɘN"d0ʢz`wqc v4 7[Vn*;mKuv&\ l;Ҡ|B %ّFR%*m?O \2fzwC%3 YsΣ>71Ӻʭf1\ڵo_7_ֽc9V=YwQɫJ8htqw 񍕍Ru^.}O#;O\Ga[%p,?\rL&bB@x 3 ¯UOo)43ӎM\~6ݚ-yn93Ɂ߈1w ؙ.PNW+Լ~JUV0"xY gʚ_ae;SlH(B/;|_<@hѭk(v")ȢbI}-ėHoViY<û۬j-vY6ެ~z\l&ͪfsRïV%`¾"P 9߅j%ԈWi׭U«s .j;pVrpb{ Hέ`ScVzaLǚ9۪i?@lx_SmPȫΗVJ,2 7R~0NǷ׃YntҌcx|v2skShlɳ@xgwpQYͮ^0?u s-evmBgL)O:Mp_S]khK>RvS00ՀU00m;ǟ鏹^STzu¤fFOh'{+Mn[W rD3h=zwF Fvݺ7?n}H'.dWqj7]%aڭ+9m>ؙvީ+S!!n,SRxfӴZu]vWԆ\n9^= IRaϼs9#)䙽J8rΨ!{sҬ2UqXtam8x?/ s/Sh6*{VH|螨O%5mKjM7cɎZaVI,U'KꛯZ%/׎)}.h` ƞĖ7| lլ>̼Tlk)ha rF>m6z>ox֏@ ~"^7Kuq|HCj:MM/tyuZn-SYQDɛDW2f]pc\/Ә*+mOߣ즯t!aۈ5jA9+{D+4ll>J[%t .Fi.T?Eϟd% #^B0RXYBSNsJ5x^]U;_.CCx9.,\(yJtKKaDVBb3yXr~S] zopuk}d`1nFsS?$OkM.N]޺x2, PcS0Y'=`7;9g1qP0!3&}>b˞M[4auc}f,}bi~a#E^)Te%S*F~sW>\N!;:\¿ZWkGu!XWo~1mچṾ;Q\Wp]Au0߁|5{RʀK y~SC 2ҲSXRz(k17O@L猽Le[4!϶iİ[Űc"u|][^Зژ b`[}4*YQ 1ͷ)$h.~gZL1_z(%ْ|JmcV~1EUS10wrݑ&~媚Rb#cṶ0᮹WQEπeD\Sn|TU4X|uRf7ˤ.0}oztq.+%zOT%J߅?ysu__yʜp,kxhB-hc`Pt3S 3A7 ~ܴWʼn7xd'ZBL tIJ(#WKDQyƜD5ڌjBZUN WǼ,FG70u1/RQ $VIWmi39# 10R ʘ3]F%zS53} $ٓ*/&lt!v#8zu;zOAaCnA%5ʼnл:}sb9RJFm\vk)^IM~|`Ҳ ExG۔Ź`[ hJCi$AG1_NLG0 W06pX!h(çVZP)hM9: z1k^ v}!%Rk7lP_<$.uډ_dy_ *.K_\x(Ȃ*}>E/}|苺=G(KL`LG"9JYPG9 j#f])t4J\T3_jŰI`2"ur5X+F+o'~bۇ 17':&%wډMk8PyRAig \=118!|ƱhWVʓ? X8TJ`'}ҁ'e{ʑ:ktexbIF]JQ#2n;p^he]0C0{)p~fNmvLh]Dz/A@EYj|(qjJp`s--HCP$;i9Lc1P S=a+v8`D0<ҀsC 3 @H{ $x2YD mA7\ B ^S"JB[ ZbArA-ZvZk M ܜH4A.f'8/p|? H8벲gBw_}=U_?go>p_ 9 _zu@3e M<Θmg'.-?BN?\ewngx}psO6ц T:+!3`4@ALc诳|aP0T15'2G Q4g(BDDŽv9SK]j&J+ӄI@KA1N!B{-)Wh0 F']RJiE!D |6L'MWItbZ%*JZQL Q3Fu;$(Li&̔9Dw u>s$t()簰U2r͓9}Q,2|qtQqVa1@G?;Nд } bJE0"IWqS8uL҈[H?KvT׆`ZQQhd,H!aV^p&9Qtt^hK4=JX(nhj]]Yu9(jl>fHCΕfII5Aj;ʌ45JkxYgj0A\*!yҕ4¨5KiV6w&% ݦ1HlsEfNDM͒i&Ǹt!rP=}T2%1XV(f:Cƀa(r?%qk҆fƸӞ*ںKC1=Vxoo _/$SκiIi8X|5BӮuE\ç2q>L&g(ՓO'7!m? jr`O?Q-&Hu.jrq8 j/%)%M5i̴%y9;0!{5?c eQX鲰ɢt:tkqY[)Ki߆+Č3'`/2n1IAO?FzI>@t5@h U@t=J)Mw\ qTh˭V;C3s5B$ׅK~$R ' oL, X6Dc)9gQY 9LC^G$i_FXD3Ut*Hiu$QG/:J3 u-J aN V{ %˓`HtCg't\3a B%]Q#yД:ʨTh#NeJI #;H3eէ81 nC|0<8JA\d/"VffUH 9CgZ2pnjZt~"D4~oc/% cz 3A8IFX-ֶM3~7Pd4krJ"QK5PTbCj#憤TkNRlTBo#I%QTRڕLI%qP@ PBpyu \'(׊эM[8ly0 Yߓ~!z'otǷ١w#X3k^?o~e \^G.t_a,i[aLU*m4Lﯵ;eU6UNY[2@ehj5S>ۨ!َy1npF`PsAɟSJ2Fhx5%2zd"EыKJ-1H"ԧP(0Y`Df5W8il`V)U2}^:%Й!6 g"no} Ђ` !xfJ.KE(DUe*(+*DB הU&ɷ_oQL͐SdwY\'zdFz:ͧazF+ bjQ~7-Z& 5  gb3^j+,^m6j;Jzvyߎو7r N$dIJ__ͤ M'HAr=SJ*#39Ŕ> a^@mY*<>\1KIjm9wcBj51 zqx7?*0/nO߼tTS=cRNExD8CfxUjCܨXw8 JiS6 t~Rz%."̸Al |YrNj-(3LQBD&d1z]JQhKIε_*h8 ޚ7W.Z$TB~႐ Rf1O[^0L"n FcˌZ9hPHFS [x IWbqQۦUT@6 tDVCk$oOcfrD 3m A~4bF|@HQzb(0p(A)LpҌ89$,G%TiV?{ɍMP;y % .$O"ԭud-C.]jլbєװךV!,]јG\L_G,~-pYJ{"Y)r~k|t[V#r,ݟ%Dc~{#Ʌ 4?dD=za9-b>} l^ #k:eˣ4U1!ȯCNWArߗOLQ6.ܞP- b?Ek'tՏ0s>5K~oz0 Vs#%M|zV=}5z$Yt8O^tD} lu`/~_r[Nlb au6juxG]\ίW?jumy}@le2i-|w%e,1#_%eTb)jS]`bY-3> 9.-!ՠp2}MVSR}!,\$=`y"` %-ᦁ+ Y:Ej'jxpv-f:aM X]7ͪoe*fjqwЇ֢KG&L=o^<@vp{2g>@bc?+f'lxIV⍐aXCWDuz|:2pKI}+ mYH;ηΐÒ:B(]$s²(&pЄaQ cJHD!f`Ab::VJw;)F-y0ZYbj4s@l"|z;C3F?tiH'V ̄p|Y{9ʙ6ŭz-2puT >4 -Ģ<-By=H"ZF+s㣬%Ti>u2"mQ@1pLA0F=+DW#]gw0W|!YgQˠ1f& Vati$(2oRA љۭepy6~u. Պ[-j& !/N `Y;䠢8xZITJZB0w.5M[͆#ñм2U_p6\6E;5ֽOs1z's6M.ޚz:Fzdtvk] ߥB A9b`uFK@'fD/.7-O*θgFV)6 i X,3e'? yqa+2@?WprlV@UMv ` hO ;l`5EGW`?xtxY\b$gph",N\>ԁ7ezU #DmSZ9Ygzf܄VC] @0): lePUvfs: jN.y_|n%rxkEچ&[Ǘ+ፊ-~uMe` X+rev3R_d""k+8Nuyu4BTJ`^͗ 4+G(*VU$"ms+KI(Sy+ݫ̏=T"%aD#o85RQFZߐpr[x2S0JPMCl }B|~ tho nĄ @ +y WΣ-kz} {A#t g791I! M"n:ny2 KlnNl}.氜תoܴݴO*I,'['dVN䲫79a4N݉Je\SpAJ_޼χB%HӢ;eo9ZԌԫOp;w>J)O KqIP4U1i9%ڃ iS(1gR=Pi_~ޣX0ܘk% R diDSq>_[xx5'`#8Uz@ߟafRpu\ЄQ4q`Ж5+k;𦬿 \Q\Gp3zH6Ц1qJY}\ !Jߔ-w J0(-f'@[q33}C>M0FU(p?3ܝe3{pa/5!HP% :N J(lg4%FZɂ(nA EgHĦdԼ6 'j *?Ua+&V6t|W-Ц=p֖K_ OECaR ͌у>*^dA|>@J`B75X'UV*_.sn_)a{ ]P{4nHyS SkֽRĮ+x=S"R8q{ۣ8$sEHV#2{(e{$Lj&X7Ym2?3CR,oNz,wI-Lp_돓&3oo(z6$ЈkpȐEB!eg{>ݔЄw!]N|}-v_#f-lL1 Io P(aH@=Y/ 3 z}%⻪ASڃ2-_|5kl` [mϯn2\KE7<tRx6eMW*$k)`!ztFJR¿-kd XD0)l) n/քMPsP/?vR ytD6\:jd;QZ~Oz1ᵼ?T"US;bztuU*ɡHdZ{1鮈Բt6VyUh>VK@ǷjPT~IN#Yo9.F%=(do۳8ȵ\V,R\y[._2i{Xx(l#ΌEZ-LHǹi [xow72aDžK {&g!\Ĕnfv$ ʰE5x,ex\BlCG^o{}C]3ߗ TPˣ 'HQ'=Ci]yoƶ*-dEx}xnFn fȒ-Ɏ3dS^%QF `3g:?y/4x/;+@KBVQƨmvF>U*4㸖 >QZqV7(2MK3]mDEb V]A$]^\G\ᨔ~V!0kYV`^%[KIXfo O7(:h;?,*ňCiWmC̘IIG4t!gIDn.E~_׉q…cR8mѲ'JvBcTϐpyքO[>y*Fn? 0]li_C4'fzBO;ߪ4ScB)/]hʪ+d3Rd@ @UrJΜJ$IpR{-)dT/恐83 Ka!Tvp籕|i#]4b~;EsWFY$e|̉ y>gpZKku#1MqeHvB"ae-`u:"ucrTPpuܝ_+\d.F+ 6k٣ JݴaIHPxk[̵OmpZ{Yv? b%\b65o󳆑x\[7,m[^/UX¨ʅ\Q2\\~<(O*XpKuX 1-Чv%O[Y Jf N d-Se7|a(V.A% HOLbrUpӻȣ~}|8T!A8ap2gCo-vQ|i:*^ K%Բfqĥ/pWvY]r${a2mYoZi@icjSy$ƫDr" P!BSYBjMj3Afm91zǛR!5+k Z~.43;[N 'Cr$XKrn9uUgR0s˟:=eI $,-$зPyX@#Vx0G;w0W.dGnX;Lgg\=)Nܬ y:;st$GY{v!W< #R.x9?"Av">[:;FVF}9;l"aSt|(DdԲ3&TsltU\ s&ϕ:wB22N!R1R!1]N#Q!E$ЫW%>3ɕ\ԟaڑ D,p/͘6SܢLr#z.SP =Y\ p_|g@v C/I& 0_$xt1E(#?8摅a,^*SSFPy®sfUp8\;#L( oiO=dO-(ba -1i,Ug*?KUY*`: ոMy<2f).g)oWxo&$D9 )eVi "zL_򓄓gg3m)9F#wtbJ'p^ Fԃ1 c$r E99ȫp"Ԑ_UB r+WrPV|H)j3)2\MvuKT3gRME@ۻ5*-f$A ^3*$TZ^*cDLI(ZM%9 +]˦(:KP8+Fbe}X$F` ѩDմ#xR ƹK&Ѣi̾g.(e\[V`%-^ 6Fٙt_UH",?)!}&xĭ|X-6>g[͘fe 6$eP2*pyj71vōU UB:+l#G9(QN)$Ee N  $oi"=Z1H$ !U^ XX鵣*j8xEskC8 v0rArM4UPr s`< 0.C!# ԪMpD+΍G9#SkGUB*RF;#YrFə4΁Ѥ.j\Y;LBGQ() WЛZ@%8Joud`)5w*JQKsz.3ACL#&%0e'KuL'/ta- ǵk*,4~CxRĜ'Ϫ? ?04Au"G \$ ll:4;OWkT4wO靧SSO`75>ACIHeB4^M0͘Fq24L 4]P:,o >oz'aW H N!£{'k^|*ޡ6:;i<[8ف6g%Soh?ԝ; ΰ>\rB(?on]hj@jUkPL g'._#c[֠j|t}rsu󏍷o^vzSa>?쁭هuF78 n;hPdOi|h v3a04iozݓrEW8_n0G;C(aHhZ>u}orgCǍu?o`߅8}yԯwm(H7CҰk_&789*Iu1~B)_n#9`dT)7zYB~GiV4w]ifqgݵ.:EfLJ{ DhbE>BQZ`h!J)\n(7W9kDϵ͟k㾀ER9ksmfT_tBW{k/o x;̗"`k)oK"Tꙭp]BqQڹ G~jb,7Y4ׄIe ?bj`. 8(!ιYJmzpc, 85NUP%*ngKwn`N[D$ wR+~,b"Hfbd9SH;f :Yֆq [4 aqe{]l*0\1p%8]wV\Q*H+D$|"qlL&/&,R^|pŽ ї (_Ea_K(k6 BϤRy룢_쥅/h›ݝףVowZio9Kc!{2: H1I! ݶkR=S&3l hAփ/5^{ڻ_؜]:jc`q-SYE%*:g݉s,Yʁg)P5%*ֈk107V2K"]Om$ pQ쒰뒴p-В+1Bs'v@SiGYǥkW+F)ô -¢I1ܧ MiMǸ$ir*a/*P {L*@ TV;rtd\c(U~6P'iň~FBp 'x#e/ K Bb}Yj!B%OKEh ⢎[l$'X(a or}(--c* O2/c%A1ҠC@E^U6 "GTQxH89`FTm3juϾU cE^lO?,(r*e&꣎&SEX볟/C'jF_ֹrnjhVzu-x&lh;DmۧZzs!A9fZ` hirHMClKC1&6\tVsYEg\^ fo0nzgPƙ܀_"׫}m&MEbH;Oue u2:q]- Vw Pr҂ti D0;b93Y1̅1vO т3;ы=XK Xs}$Dbw[3ΓPlH+,Lޛ+Ҭ^f4WYtT<ĶvT3m7I"3s2xAB[Qib؁RkiH ڐǤ=ƭIr,G.tts )q#{O_UWp=޻fD &~n5 wz$vSN9Qqڼ;mT>WM{ 2U s Gq65UM Ys9:9x,nBiwPDTuɈʐ\%ͽ[iSN!EIh=%$T~}Dv& DG]6`X(]Xhq8j#&nHTu컰ԍSA@SC9!Ք;:;VG]\3mQ_k:ζ 3Bv>]urSN'Dvf+u\N!yŸ--`XH)jmjy+$EZtO?2 wy"-KJaHPuqTj-2]iYwBLk sN w(3UG>Sc&qbٷSB+ WxQgV>cBzKEv,Z#i[ya @iu 6P)¨嶂OT5҆LPm*Sna󥥝B\Z$eWZrm!@0X' i\No~jyI'ӄy%&OjrzvuV%[ɪ~9y`aM_?īo~Ͼ7>Zv"RLiċE?=]?djGyJNLgɥL>w9=g#eamوêtrϜӟ?drQ$zh?>\s:f_ʹ1xrO'OL%6-MjҤ*ѩ|ӤS&l/rk#iyR[=󅮩mºf[i6gj@rgl֛z RԳNm3 hhwB/_֮ 9实d!1q%(5NdCDI 02bXPw>8M5 OOM5 4P;kg{g< @BDtʽfZqc* ͂Ȅc $x: S@}^Lp7ǃ'{lx%Dv׬{>QcZU-9JH:>y9(S".҈*XYYK lh-߉xiا[w_E\E\"e%@,{gd,bx#/akaߩ>ElO| 4R?;xP6:촁yy8Z2FղH4*1;JeDpR0tъx)*b'hiRI}1AM6ۤnۓK eDHuaؼRZP&,Y[` cpDU:Q/_n G>Eю} 2J '}`0VG %$i9d# 12f7jd' t}~~oijh{hh Dcz) ǜA@0Gv",r@uǽpp*j3N" &H%6J[|p St4XOY K_m5>zb9ˀ~]h":{ i"&l"p{"p< q!`xh -@tikDG?󐁤`ӽM{NZ9Ѓɞ-@Vqn" Op~ZA$wkm瓈6S<=YJwA7V'B\%zC` x,"=q'RFNC8>šNjqjqbjԬP G}Q ^*E&,jYxOw/?V=^M9X+FSLJ؁W")u^HPO~wzRA 'Ytnr ~=_N$vVC "ΒEwynD6:ڈ??g׎j@GH҄r~~> E'~F5+FY|RpT:+8.r;C99;(jy4NV ۵-*>5 q28έ|<:,A8 &'!'i:{s''jB@=) vvpP]=}c,,, 2mr<@/˯VthNZӎTuὗ:a:9W99=}}>xoŖc'u[Ss uQ'ҁ;==yv4wr$G'?ptmћ $/;00 \gH ~tOkp*4~?~,źG1~‡sh8_F/K2Q)Ter+>FмQ 0(ǎc6,VU{ p}\$i d}YR;k|oQw_}: {f8)VGF.|9|TKۓ˒]#͞cԥu/?B-?:cz]~$Gp<~8SiƊ$:|8~|qsnu!kAc]ͿttNG+_Q4 lFsJ:PO+̓. +2LS|KW~CSn9M.֓Vyb{f}=f9999-̥xw^R9]KE`\kKU+C>X>In!C%uey0 A eEȁUђZ93'82F ՞neTP/4ꨉ^r|%rtm[^)TW{]]tnjcH TXr, aRJilDi@,"8^6s&uuoo_.d.Jqw<l}u… O=v9&0!ٛ#_T\_>!Gqp~qdrw3󷁄z?'1!3=oOzd%H׋r8xK)/^`nz{OKjC܉/zޙE9p ,/va/~y8}) T,qGqlr^) x'+([0KTg6_SDYΉ}2jMX{c]ݤ,= [ b4@$!?h4^@S:yQk! }tu" ) `$<,4DZLZ[| hx8.f٩/HqڙߛR_lFg%A !."1-S˭6-jAlJ2ɣeQũϮ"f.T-6WE'Eξ._^?7UH611bjc zvXڲ!jF$,̮ZC`?A5RRNrR@Σ]@ۭJ4큚׹o_߼}(~Q(vhZ)Vg:MB#r_e fM&#`bUF?I =4ąD5yO`Qs\K`FC:w`{a>[Ժ(KËϭ78/ M4.^):؋Yѐ^/S%)L +Kx$]A$p&3 !B&.DHs XE]X3VS J5 X{fsb5Giw좇/i*WBR|QQ^ayUXĴUƤa2QXa2jQ)qXIq1p9RqwdёߎIbIU,ܠC!Jga /an+XwxM,0:X`<,a8q\K )Z?s?YOopd\ ;h_P4\ HjoӪVn P G#e $}v{"䐜hRW$_IS8mBj*_[4ED%۾B_)o9gC%Dv.1=QI7/|nSH.ڠOg>ep Go-(̢@r(au@*7,8e}>7OFR.ϐ$ +O.fNN.ř5 ]: Ew1\"8=C6Uårӎ\j hwOi\=bʋ TJxGR#,|A{18a!.-!FݷgoI>j[0"B:M I)l47)\|2f'chV0,SRItzD.@-QK44Nx[u)MWVX$yą+5+m9Sox˳-ֶ$GJs6B1t8#L9R+,{4bp]ٲ>dQJ‡D=)Bpr篔79ʗ|W=K7}{gfe)!ݍѐT4?p:%1lk4, ЮxKK4OT8e~"IXi位c#TkS@_NRW ]B_岒5O 8y wI|I>|,Jqyʠ`C65󹥩-j|!M9Z∀osrڄh+-F.tHg[j>z>K̡&$Ε :.ncT~)vj+ lSpoc&~| ;[sq> n5E|}$x>!MJH` I@}* 8c޹EdzFݕ[3,4osY/JjW+BK٧H`:'Y}`#;LW޻s*#>+p%S{{$^02"pAD娔[AuT2kR`l *y+,ޝ]R<~lH Q4 iO(K~7IA P2 \l% V7diz*e*`\GFB TD_j L-SY+Ċ}rbBo+-[tp/'4`LN1=9"}1`/ tp`/oM$ O_[O;zOvXR(0J',g`33ڬT)m6J#kk@I1'V޽&P/ "WʮzKrÌؼ\ɰ1 M.uL Sk, gŖJ}id+v J`Z!lYEW2Ͱ8 z;'&swъ/zx@3d|Wd+08@pjWo7Џ^޼ھ`8V6>|}|퓦:AA; x%[%A6*Q3{U- #ubfC䌦(踋KB0/]ݤ>s̤GȊ cΙ a{8e N;܁YwMN>_^i ʶGE9/d]l]{C.o7w| ,JPFN{g?RH19:~B1jP޻wnQpr3sV_ņ{YPk3tݠg%[JL>C&O9{55|zVcwLG Ϯ].{1 }$2 w?q,2BQ ai-S].gI?] 9 HԏSC"))G E TтȉFX^$+h-nR&2bЊ3yzL|O5yZLpCJq&"[ Qφ"'tuR/dwsܢvXr֩Ww#I'k8-ܿPX*%%kxzHtx1+1R9bC's0?RosvR:zguPr0 w ɱkh}T*EdRAyqϘaW $hu>{wv$-yw4< i%VJY|u|zlaL % Z24ͽX&pJq 蚣d}CWYQ~Wkn]-{ ygܽ,RkF$O&3 riGRݝ r5$-gt+FLTbXGr>ԓ_k"9w[d9дrQչr/ WvavdD^8P7?2Y$[D3V^_Nw=M6Zqs? k ^}/ @͏û?Sg^( \ _)a~XljAy4(.UG AJ\Xe4pכmb\;z[*RLMrNՙ9[̺@Ȓ#dNd[ԅhiq&r"ݍU q/:Ƭdu%D;kx+F?UȺF'Y[P3ݒ$alcFgOOxkIJ*7 j&ypBUMҼ`ƛafAƐ5JpTfR Zz ٽ3HWHd[5*^چ5^Z{p){5 ŝk>qܢvv}[ڵ z#?VP.tq_g¸ہ3~>ϧלö|Uvp@`s7f32q+hƚ908M5ASpJY)[Y +Kl>E]ܼ0wn?r: }?}4H-6tsaͿyeM;:}TC1,_ͭgtsrdQjS}1X< rι& L>Q`ap |/~MO~gwa\ d}4Cb g.7 eGCqa$ |6+L]d󢑘Hb.?:9vj,.$>͍]Ù]lvw7> uƳ?L3|~( nά}[o+,nnw4}0Nq\=}m>Jvq1]Z0R(srI['H9]KTKۻC|);|rmK@il91֝c=B" I;f[b HGf {gOv,Zܑ$pYaT<9 ξ/`mh`0zXxR>&0_ɆRr!L,KQ4خ+-xۢ7RDp/Y|H)Z9ƾ] !EwA|:Jy*`d`',!DI%h ՜aTO ń \r_A83/?&Y&L؟#t.Bw"t.Bw]==!g!]I#! 33:P( RpB_|\bu50锏zѨ@WˮU#g,G/rQqkJ>X7]eq\鸸df2˾(f:w[<$GD B|s;}i"8,VUvήh$n,$)0Xig_Ʀ&W"y07Wb|XYĉvɬ D;ީ^#D<]Jek̾#ݿ֥2:̤.ZVL:vPFH__d|Kpc`/N6S*n޾O2c` HPap?W{Jŭ37S2(iqeLf$ @͏~e˽Smg,`}9kxzx_ )H| K}G18~ d .?Ѻ a)AϬ;aqg:8>[=8XG09?ס!ڒΚ.x)4&zo;qr!< ^v%fð01&T(KWoJpڥ!Jsqy\S_~~oܭU$FW/q^ht[wb}7{bC0 u!߃8!C~ן==C Hs0^$xF]ewd޶p,)_Yselg%@VޤZ|* %B`NOt1 oך:xڽ30G[-zvŚCѢ0v,M[qN:U`]Xbm's;usrܹ)o͢.Rw7zƢSPDBJګC!Tu-tۯ1u4-7Ե`B][1gtQ>gНºs8_1ډѷ` Heێgac.EgQERAIo֬!t^}G)/f[ѺYsճOE0iSL qGGfP1B )ϠaReZR%ܩhG .@i_>%7jbuƞ:qZf2<#TxŶO%St[CX91ݑs]um¿d٣>t-Z::]ZHbz1`q5K*Iji %a6Vk&RS{>Y 8:ՕZaiҒIeF8Zi2!ÔlVlIrp&ORȓ<9FG~}0K*)$.&ę `8wL{z`FQ=uVS/#]_7h|t(כ/)Sfg*}|ԹbtvVeLp&%h~/@ f0/h,~/nQx?$blS.D tb) Kc[SޜZd0i.]32:A!0P 6<4ՀKdp`A*Fq 8c8`.3P &y8F"A z(E cH` '`̟!Ƣ{nz yMRu)"^Rߵ$W vK1R-1p"Y tؑ>Fi;}z8S[J6l0D{f#An嵟Vl u*]x- hV{ v.B}v= :|ncBZ|1{ptPDZpS@ڥ3$au"|\Sw#b+)gSB"{iz@/Mq?aC_3wlO]/"'E}2=Zu2D4[H)q-4:"Z=R",Yfa`kZu2 kRkXTQp_røt:Y/qf^~LR(Ln'@Q/Y_;Sel>%&v6X,'HILs؉iK<+ H[EiHW>P_]>vOoFT5jdgc@oZq2^` ~4R [_/7/G;DY3 BdBJݘ`C0K4Ƴ鼏 3^:nw-*X g28_5E?ojMm3*۪[P=|}þc OsJ wkO<>'1E( $ IŭI EqdInF#PVկ4'" dGp1Y$j0Ţ,B Ю&}:#i$䄳M8Жd_{?@FK N8KP %0`T,ɡ18{(덅& QJ)̬͕ЊJ _ė~@V2}טbXBh%9!Ku'BY STJ-KSx^؟/ng;mbs oqkkC2Wp^&OcFHũi҃{NljRÕw6X.8㨭[͓IOQ "hg&Gњ $1ъӐT*c9+8N3LJquU+ <ź|WpY+k^U| 1F/) ן~^?Lݫt<GY1ZٻmfW41-'q44oi:^@ uH9q=gABɒEY,m峸O``3}rrK(Wrɳr$r Q6SA4 {qskjCGPO^X:D}vKp/TQ8zl%]eP+aۮz.0Ae$b*ޥ]˗'!(V\pf`mK9A/ a@\))L25YQρKv &'|F8j slQS9)S+»4{^q4ٶimdۦɶM6Ɖ*%1>; *HI1 =c97 kF*?` +JrĜm8F|0hsUQUWwm2^Abw7 r_qK ",*n9DS˧*D|Gف鑃K@EJY~`]pcv@QoɲU i>ep\!%{CG (?^!(y/ejz,SUUBԿH ӜK 2l:+|&(v~ *s_JAVp%ԾL,"̀{ ESKT2W#iE]aݮMEĥzp3Eճ}HcZZO퍑fLV3pԮzK?K8׀ekm#Q5vW]Yۂy%Q $[|X5q 2GZT{r|-T 4Og^s (q0LkC!%kh Vy% p/uJ;ψ](}0l6i!(0l[Ǿ5snH.Nv ͓9ȨU^4 mY csLovk;H+9zpǂ.J=JGJ06KDX-R^%I<+ڤ[}k##=FgqfҢJP=u{qQ]"Z\=);"6cQ&wTftip1tSJJ:CRWbGScSG(E(z]cljW!%mKp#OC,6U3ۊTH5_%r [BbxȞϻ_;ʅ4W.g뻢n,~":^m=\-b8ie *⮍&ZQP"l"]70svs-[f8}!{7!5-ոHd7MV$#0DNK$-tt/aiօ?~P[2׏IĶ6&Rb;)1_?\ %]_0h`JBv@*ͱ+_*L|pdmA[Տm po ,8^l#-h.آ ,H4!ƽd"%[og|Е|x `uIr )M 9XbSR2})4 a_4g1&20(f=E x&J?6B odSᄋԵ#TJ#>)(-;WƎC͇͂+r@p|92[#V$(fĶ^."F Dbr;؆=qbOxxb}6N;I6_8?pR'O:}bcke$sfMKkvvU"ֲ~d ^Kj1k&N%r@1u & :Cc*FcwzHw(~Ή Gwy<;7R#2(o95ӻ&I8}hƋK==-? Lx__73 0y? ֻ>cm^H&)֋Q_ݞF^[:}oxax ە3޼{g#Mۨwpl}] ~>'sg3M8iQobω2Կ*^a4H^ꏰyॽ~wp`p:"Ez4O0\ vm6pO8=-SyqV@gCKv}k Wn4W8wCnmqESA q:i4k:54S{>ri,ǒC`nVV 1/gvqfoaIkm>A6=18SyF) jl}RLU(x`t7KB#d٤4qҬߟ-^o:fWgm囷D#zuvg^dwґ:yn;i咾RCtzqH/thR;->վ;gn47#4ůfr/ՑmAKrzXu0I;pcgϙ/;RvXv Ua3a>;{%nt~fVyәĹ0TڠĄ1 f<*ҢldY/[&|~9|(x^3Do6%J}kZhx IQ4a:)`' O*t_E6K:[[CN[0]PS4O?K擗09 [峱;[ruLte`"}GPrK;s',K']f=8V81Q!9xՒ np-.VLj(gwC 5C~c2wAM͏Om gJ%Kɻ5=›OذA1_[cr5x}3\QsrJ1X [dCOf>8%&_'zecr- ΕwgTop/0:WtK +\p&3J8Kaٻ6nUA*|1?8 &Ax\a,ɥ"94W?]Zsڵ Irg 3f_T>נ?!ա1nq7,HL#HnjZ55 ЅVbf;R-ߠA7%dhZhлN7 ء?=?*A=E<5۶H~fBwB`3ww|3siHގץқ\g>-,~g^o݄'.~<ū}[ųSwqoGw×AnE=x|}KfidʄT$)XfSjK^U6|0m7@j3K#xH`I`EkM !{w  aHkVgWxp~-o\J Z8g2%S'I߃ x >I+=@@+O@g ̶&䖆e>(i~őE^'o65SN'-i|=K0> ?%yB5^ގrܤO'y2nu`==O&m!!8|cP-y]OiU9hg|ANd"T4v}N|MuY4`KKFmtzSiL&S_1W֊n!bQ=c{ AͬG4I\:)'>^^ž{XqPJ m*K*ՊW&V@~ 'O^P\#_$GA|τlgYOͦעg Lޘdm-lد7ʓz`Fxh(/Q[kDJ'F%4aЄ1EL(A, у@շX9C1?y'Í[ę| -iuj=#W74Sܿ5J#C2v =\ltL 8WWwȞ&f9~=s=Hِ[:O-m,A>d$k?e2prD Kr0mS `n>PC \(ρur`hx`7}o7,4h+7Bo<Ÿ [%Vb:7a-%L!tI 緫Th;`|^rT=?fs|[$8=>C؞ ˷|~|U3. s:/}ĥ-jîᬬʯdOGS6c#p=n~CMH@ 6Ԉ5Ĩ o_y.aZ;I}f?""+j9H^I^E0D)LV0BxN7 ^̄#2\ß eB6:=㊪ep{h r# kiQC.tΑHHH_*s&_J@:(Te'6Qc4N W] A'g8?.fͨlk4X#+;4&"Y.S5鬉*~wO:<;ܓrtsY.fo#-%R!Pky&}݂! h44ނSnʑ]ِM{3bE'51+m|`zy7o~䢊51<μwNdz03?Άicߢ7_ߍ?Trϫa*PO={7}&}_)(,ucx]ԸdSƔ|70 I)^#Qo.]N_9Nxk{pWǿ`@gr*6 JK7Ey~IЯP\`bdw_$<0fJ0cELT]dܓ F0ֱ ES. k-BhAQgSy,^ 2Ϥϐ$*Ѭ[xBN 'l:s: [] {Y项^ծDhfݎA8X0_ ;9@1Su`vຝWY!@vL;Kg8ȪmDž.|chL ^ذa3a pv<| zÇ7 /'*P߿\Y6^?q&w>(k>͟tjkY z혪Nƈ2qa ;{ܓyƔ1}H׼&ɥ"{#Q4^}$5978JWyFle6(@+3G,9~tF)e vO|{قEsBո] M?v^ O=y.~]'?tryRX)w|Pֲiry¥)$%fEt(!;QS.)[ƆAgmP071S&:6fhkկ`24IS7I f\7ͤ]ߵ<[ݺu=z8zGsTRM9z Ѣur0(BjR{yP*yo}*iڹlh8rW{qZ?=9l(L+J-{l$:'*ʥ!B;q.aT QBJ0LbRʔKEFe:L)<#.4O p+*bD9RVak搲V&kUe]T?XFQA11n:-@ S( \Y^cR u?n6LkrH]tQx/ C`2,b^w7׽swuS a 곓<eNûczrXʡ8#~~< # 4q.U]qnngKf>y1j 5`99'"vxqR]Nxxh+n|~b@-?(MKP324$8r^0%{t) +<#oi:*j5$p(yơ3*xcT(,;\fgM(^,:HVF V睟n}L(϶/J0-L\&Uݷo;Q7[Rڐf9UC|8ZF\k㬈F|lC~--|{ݛGQTX4HBP8r_"R[ , zȈGEqhzhqrCig'͝4NbEmun}q5~6tC.3OcUȦcHU5vT!)LFJcNhODBfħ"˔"[ Z2nxR\t q5wnZ\@i֐n blRol-̬oW4)mזԘO}<9t?͜u"3!)+)ge,cQ[(L8@=`N dʉDypF.kgAq ~U(Ha? @m%[R5^@sTTaJJ9UZbQ ^ajM=.BExr7Z3`*Sav$Ti*tVjr[.u<]j~{ 8Mm䂋d5_*tSXj }/}tNxx V0Fu %ershrN+Ӽ&SB/J (ɠxX.CYm_Dz5g9zeI2WVۘVj>zzqVAgk`fmm%WP޶חت/Q(hKޟҕ#q^ǽU3UV+T+n]ЭuB.ʡ[B `#0bH/f8pS'0}*Bd{uTkG3RexеS 6ЈHax1˽&dHIMl$1Q!b,V Pǥ$U!dH\ٻFn$Wr-b`>,p!adal~[YK~EIcֈIkŧU |K?D{Jg˞.a-b ib#<$F~BS'']\Af(<;Ƴ՜HejRpgMmi yj`AMMBmA Ӝ 'l y8'y݌ancϏkڐqq?g Mġ]'*c2m*qN@k!-G;#yH^VGJadW!z8'rtpl'[)gaAGAAs{=oJI]p Ե# `aGh"`I)%HI_x+= js<" Skű3e5hcIvJfzK g "D`S{A5CA;# 8ݺm:A/ZRoE!W@3{ Z$Ifgv:zL1_L:*P1I876 bŒ:q FC gI &z!x?S[Eu4Z@)ٚcwHrHWNez@[<',T8T:Te5"ӹVym:3iU-&9 p> z=cZg=d6@rS"b0(€cg"EUwcS"ǍNә3.ʥat9\7esX!6(̀, i+|=g:07 IB;嘛'L>jdzv&9w1Rޮ:)FݳeZFϥ׼w4g[/o}f]P>s"+oP.<{Ńi?]7j㓫՝_.3yxtqӻOK f&_>5\E}o.'.y op_au8^)2qx+֕cԇw_^_SL4ɓt  HsM[I~]9=~Y2a [B[pƩWˊd%Wc> ᵵuu*z6&T8i(#.ԵǓ/hj tUb\!,MN{xm%!X= P#';@gst,@택 h;hq)6 "1 ^9!>vˮSOA *Ftl@Vm#DLjShSS@h8e#iWbRo0٨Rȡ5IfʬFosX4`fGQSŸIU{'% 2Ze( v8ƑJHV0ôfŭtxJ1ܦ۸l}`I$Nj|+oV&8E E5p Q=@ZY 2VN"oEJg 3 C /TmpǫY,ʪ]u:+yyK3q8-!wl~;mTv+a5hbП>ؓ3 gNKX;h"t2:]`Yf1I> ۱N|i'.'3(A*dZǘWC?ku'v]filٙPZ;mf7(j=#Q\R҉! E)^* %zY0z:Gmخ9E/=gp'8ι3j(y7|5q@h185*VXyihB #8A}Sc2b05YqCM]v(FΊChީaX :Ҳ:p߬t;gw0.xՖj\!/l1qnʀr캨FmC3rB%=W$sCpD%v" 7⥘w gZ0_˥9I_|且.6׽ TcXj]zEuurpI~\>)4i=y1jp n0o]L%yO%Wż;Cfo4#1)2m&>Ev}d>g,?_ %`yR,wkP=.?9@Fc;#5M0T/aY-`|l~N[.˭yyX<:ra%X8KFqOE}/ mb^rN ohs\YvEq?Qm`#0R *# uBB BY{lFm[Mnm B-Y%+gcV渋myzl6eH++&~[=lx`{,Vɩf_Ur>UOx2TvE:ޤ;INtZ #+w⧬笯L^W-ST+9u$,KK)* :f.Y;׀J#NNu8:-&Z}zMUMURb#s(94q#~^ 2F9,#㱆 ;z\ܸHl5m4ד?x?8C'f^ԼdzҸԼ=E#5rWF?ώFj쑻T@竺9zUmY +v nIORlHMϸNG+cDC[=E:XG8 FVP4AcjI5F NBA$\j(C\2QQMlO,=>3vwRS>>cG#+&RZh03w Zhs ¨iI;궚r!c X+ x [cؒW(̮3lBL-z$|>b:v>SdZuؔqysqP6ɜˮuE^wfûC\1#_)cIi#?D)Ouo*0+8}\ gjr^dWu^<%>/_ߒ!8OjW<ѤL)Wm#y7C|*S\P{nZ e$w60!߸nT! Lh! _|vu*OO0ofWD?߲w,~֑Ş=p{ n/vᅯ_\"~Kį%+rj_G#Bc+`+%m">`}@K|8y{/͗NrFXoyRpOGmpҦ_׃o~yrv95?G6F>ƇWՇR<E@c8dΥtL\/~߿6?rx;L_w׋a6 |  sk_iOz@kF;+cg|dzȜ{xӘ~ST-&|op`V7&,U5hU1Jl:U4QIORjԒ;uVNh~ř'Z"[`uZkE}?VۊmtFL37RP(8q# iK0Ki6 ;ԹFBuNd sJ;9hU; ė>:Rjέ"ιȡCI]z+h+N{- jc ( (֖lR>I5no7kToثRN(f -2\HRpf\CfQI铋r7 Qs5݅z3)~=pm› UggJ9k$-Z𝔄BG[+m<;rq|T¼=ɍǎ̘lx {I!l:W+tʛw7痖O˯?xQ_~>/o֗`r}^YgQ'K.ts ]\誝 _'5 DB0(XŘ! /4xC=||7~veA (E9ʭyF~*L3=h8T\l8ͿFqxq&F/ki'G_-+yFJKHox&*2PIy(Ls=穼ޚ(5M]$'Jx :E A8ktxB-mX;=yi:*!(N9 xPx yRࢷV9c F#D2u4nԣWP3@˘)Ma.4FjA;y h4_Fm sQpk%w@ >`R*9A<cI`k奏N+9jC#_7HEog5m=V@œߡ"W$y~~ՠQUt]+ReQ7腷6j=jBh9Z}h~+YWͨ3`1xxꪒVMoĊy"8Ctw8ouz ȁ!M2a;?b'˙e>i^bgTqֿ<DcPWM3zq!{Ym뽤,W lLﺝzRU; ԉ-{N[]Un Zur' s `{\f7}|ĝe&eUt'ma ˦ncJf[h)7Ҡ~mEӵ),Ps9QY8.h%[ yޝj" n_e 2jin+|%6ŹS   #(CAJ"OR+x]Rn9Un903l ~A,TqOlX;|E-_JI*U3خ8ݧazO"8!Qk֙')ۨyp "INKGV| &X ]ciZBr%1YaW!&C\  E/)w$ W˟kؕkkk;QlF?NװYJ&'g^}E6f|X\y`2:bBH^AvKjp;!#oڻϬ8f9xJҔ9-c|#- I+s&oh/ܴ=YffϜGƆ=QQG$?,sȃe$YIFa5+˗E%R1Q/RrmO-IAf$$)PF^ }b'@qQw׳ZS7ߎDU;y#mل~ZQ 0#8ޣ9dVzo܎L747Uhnԇ93tIsTUí])<=E'cxid |I&Pi+!WL2S.3HIriv_O-fp# }n]qBɯL KϕX^l=>7d Ʒӊ>?՘؀`??͛A%BxHI ~6w0THdhMcL XԅO~|TL`>~ߜ–PɕQeq+= 16rs' 0@YE)0+HEH=qg28/e2x?vp9ŝT\^:3)YҽLߒ0+ $Frą %}tabFah10 oT:)9qQ*ׄAI[C4Ujކ0מsܜDl/eCXfJR``8DK>DDT +(¤"vE=6*A9d0bm1/ a@!c[+ad>uFKRg.#ߚrJe<98t) fm h59+W|=nZiw@>lѓ>Ci1 yG7}u!Wro|s|T1Ƴg,Yׅ܈Anb,KTe"6':)DNM M~hooF34Cqnǽi.o?F$|ͧ˫0}0яW'x {d.,C{fa]f5,xbaÝfP&J>zq=q9j6W4ѝlΟ޻\O8j'b=+d4QQvbrSYI@nGF*[M-ST3L/,)66Z3ח)Lq' +R6׼:~Ɏ"py#2ͧYآ99|b|)!%6xaě 6io fD=C'~M9܀}/rIi 1t"H(.(vqgAP|TFP`B<'"JS* 8\( ] oi>f|x|󨢋v[ fnj&_k9 l4oKcciЇ+pUBwijCx "`F+7R 됨AnCX|(r]T(ÇDf(Ԯ:/ =!*@i. nډ5D $T iABJ Zc FNGV+L Y<63]*@@B㍍%= ͦFUiki9{<5LR҂-YK /xI &;x9 F&aPԞD0+g\HfƂ8`_\  :Ȳ `gh 9Yb1QdH04LB$&f`J!Xr%Sgm FDXT2x RZb@Hi;g">҆z:%5BK{(l,`X (D "y*/bwD#Bje(;o-cpUW8_n, ٱ{݁ju]> xCKsᄳ\cw.- m!CD[p tTɤ90.|B!W1ƌFG*os]|||I>D$5%mXe?JbҜ%1@e11u[1@'A~`sȚhYZYlӜnEZ`!pSK.M/lڒ%2U( !`bxհsj]!4C |sąP ;,ԏfco `LP XC DQ'g ayQuo~hYZx(rl9Jmypu p28yNō+>dHRN!1˘ {N PXR[ӹ3'-~|e6Up}(:3ޤ i0tv8d;lhWrN`<]4Ib3&jo_E&QЃOeE&YW[1K8ɨNh JGXQY ǮRU&HP6Lv1&-6{I(Uͭ[ڐS*Z=օA̴q#\< J0"O8%#k^ܩQ{߸D\'KKaX{䄎J%PQF-?T3jf!pDu䴸̯bKGOƴD{i-zLFT=?~pEe{K\JvTVFw8L}Ӎkqo4uKٗ`6ܝs*p%UcB#X)U&&bϨfiEQq]o#7W ְd,f~dȎ$;.ope}df,bl@$<1N ح̬̓8%/}:LҚ\6(cdϼW%ư3ȼ#(5ohm*aλhAiUoy-+QLJ#=^@)ZH QJלf3 9)XBj3`)Փ1֑C6)9\ %*jcPĮ+KVrꨲ(@EsWզqU 4NN:sI'w*a,(wN[!"HqA-BNuAN8: (SiS9kp-Z\F Ja|̗|qk8]Z&£ׁK0r>bJmLm|Mj'%" AL"_XPZDbhN-odRe#*3^mnYE[3XmMǕH/_))"v{̀EoSM>j?]]wT=thN Sh.S#B.Lb_#ܮ~6#E.bݯƄ .c*CC@y(ܐ'+p-UCQу`?o64ĐE2hmmcR(][@yRDsf@C(7Z2 CҒoFK5$gǘQНu%#5DJ$SC'I "uF śxpoMu&a'X- ੧h9:THRp脥Rh#1s 3HO(nv -_\ӘXh]I PSvěٶZJMP\+Ψl@{4(ͷkҧ0OR'Ei3HP%Fa teL]IxPiT)ANeět2%bUMyTREG'FO5=yx#u R:Sf]A);c]i}ލ0Mkؕί$&=lHN^OSn$}7j9Gjkָ=܅,%/SK jS`8v ;(J %"Wƻ<֑ϭLx0b4gkUpօ.FW`%f<븺ecYWi8@I5ڜr *#!h 5$ч 4!eV}篊混\ Ury**Tm_>0(wI4 >aǕ\\xՂ (;-a4Y,3[uYqWw_[\NjЛ_ױu,u |]W |Uϡ/rdʏCJ<,ƩϬx^qٺ"O_w2Mu X+1`ӥe`ʋO.Ѫ|;{IF+85׫X2bd4%p'ݔzOj뺳˛wvn:q֯~'OM&sO)wsnCy:@j6!w 6YRBWaw,uxe:S)J-D Bڥ( &w4q$'i0I# EZµPT6lWȵuuݒR3ҟ kZϮЖ}9%-iDž 0 |п^Og"ىwl2ESQIMը䔨 o'< c}h0CD}_:[oo-~ȇe^׻Snי}I;!wn_m9~n% wB zZDMg"I(d:88a5zp>NFW 58k}A]+.".Oe2~X787:h5!J;wv ܬwkcsIC߄/WH,;jL\w7N71ǐe~7Q3/><耔@v[d~dDDhƏV5G:P5\)< Y Fe rڿ"syDDZt:5߇t3C /؉g[nlΊgY ZL Hw4E ?j3 r3LXyi\+q տ6$-+ `Kr2 tHN "qQ6$gpߒU0帚Z_(GNfFmJh3&Qm]I_>QZ;G3>h-,~%9 K3EE2Q zxQU-d9ԙ Ssh76.E,;kM #e(%|D %u=bAm9 m[&vWJ4A({Su7NU _g%ONXMk[cv~(ƺuɢbTNkɨ3]sYOfu^cT=N-ȗRҗyur.1~s%BvOJ91|֪աw<ozlEٟO<鏩lをb~X`yA--x'\N*p1hjCV-R8֪rBswK1k5&( >CLPՂANx.2[_HKU2MG#~ffVQRNm0L3U_QSJ~8Y1X fhzU~Y!5b(J~y8x<|>&  r9x6@x"d6/t!O=p:t 6>;TJQhByͧD^ M~&Z j"@!eTNcy3* o&g(IhZd_FaBq;$Uu4%Q~Bo!,HJ'Nzm~.ݹC%2gk(s;&A O8_);zj%jg [gS$ <[ 5b&PBI (¤2 >g*a@rj8]%R(yeZo54pQ6vj8/JBT-G<"b}dGq㤤AuLhmY}B \^r/(//zek^%K ^!/_ 2/A/1zKZp[\p5c=7TBN6APJ-< nZ8LWVŕ".*5t >'k)Ԃv$锩v =[|a *)㯤bAX`L:q76/*:O[Z{7zThyqlrxp1]wx)DC[9Q5v δTi[0/.ktX7˜|2"OG{N@ @ʸA5 8=n%/GDϪv{+qcob%inOIʥoA K %Bx\T+ (L:h* AT |:İ'\h #}eDzʂ++b͸4ӂk`SВ.0B 㜥ƻ%'EѴܜEk˥FTQrg%eCr H$jIfvf13X;Ɨn{(R˜(_H /1f@m:ilxk =WY塜Kf jh[72jVnћٟU jB2W^]xKdd.K-UM5ט=#EweX#T di %'RwZVH{m auX_dC{>'Y%ƕ6sztP@r F&+y.)H]d"XNXTUr*]!$wrџIh.rNY#,wrߎi\ F <D W?5=~JihhMv wǝd-G$W>t|wtȶͦ"_+,/>yحSMJdi%1qȂ C)<12ě,;y3_lp@|"Lڨ%_ĥq@|-xMm1,Űv2xХXxI{pfڶ|IuXZ @8_S+p3 &uJ.u8OW^D8 Fu1z,[bT*Тh >NzI"J2C -R1c!.Kx>Ta($ LsWש{$]G.Ϥ3NnNʲpP87=wP^ΫW]C+T-% 9!!%KAV!0REFx1+[_CVY^KaA"h: : +c1 EY(@5. ᴷ.\U-Ԩ)U9*F24F+íq߳\kgWK FsGC䞕TzLe^0ӘF!Gft\x>fl2X5 `0~|&No{la~0F5~A=\񧟳.p]^7Il:Gy^]vwWo~~חGHç+|0Mǟ_o#5WCpp=ٽ'K\Io2.3sM]R,.wgg1n?σEx:7Z׋&+ v StkFтk۔,|fI"Kxq`A pcyT̂'?)rsNEN!,(i4Fz;NrICOWec'f&OŠRO$ l٣dv)ªn dUM+0kxOjќrx(S>x濷ܞX@ o=3HXP0) BV Ԁρ#ŠMB|FmM9xG74!JJ%5ݞDєh&REȉ>Ae=K9`R9ɬE|AײaUZJsn{(cBH+-\ Olkpa*R +X1,qKg$+Wߏ/$ ~wgIԩw!L_FfqzAa];uֵSXy>Yse}pj 3VF[ir?-u 1yDt0"@ zɵsJ+HN)DI*,̮̰BcJLj Y[ 0Ty\.xPwl{akSGHڛoQ"k8լ)?0C|T.aLNP}!ːsŗal`\` }*:Y%aW!յVR;v@_ԮG8Br1g;·T{C&< 2$ukR C\)ĭy0%Pj.m-G*Li=Ԓ|ЪonSo*,)[Q@n8[{grPg͕BK|ݬaPŠ0rP],jYX,;;GhJL[Y!.W \/7yjŪ/6Tl oD)«:-;IX̼rW J h:>p=36"& (Ej+("Ǣ!Qg f9⇧8L0'8L@<:DGG$W=9O{NvnK9Zb-K2R4Y2&=OW<ҢEҰa-/4 µʌQ tm@47l A@Q>w(&mݺNF-iaqTi$Hei1G:I. -"IOdC%cFO`W]৺WB "vLNC,DۋǰU".Ўx^]xqiLcVجB%3jkѳ(Ki [wnx`\.vHܪP6gUsbQJt5v{=UsŠ ahp$hb _F jH+-U22,ٛc/BUf=G؋5t~|&LX B"iHFN?[^%{`g hjc?;і'!YƂSΙ&PF``Y@0#TZ#_p;g8ȝ5.ƸF=w\@t6Zj 凧6k*HVpEzR}d ;.ir[et+/ΖaW(rQ/{wSt4;[id@)OTJqG>/dZ+B@DsMDa4bØ U\b9DET(pp/Sf , |~0;^./C\zc\W"HI.0kt,aTfٻ6ndWXz9gи*?lgTśq)0،u I9qROII# MRbiht7Dsem$ siļH49.x\I>33T45$ yc\߸ƅ7QޗGV]u $@iJ@) PL!J$-$UxʉXuwQWG)D-M,1:0rۼ4˒2}׶_mچ8)\6a#s6݇к_t%AP|oLJ7nnDa X:[cst]ꅛq`Og%^\ i»[>7:rAUNZc* -Y\ X^Gge^ Lit5n#!QV{VSgdZh<ɥQ6P֊,Q# ԡ;5-{=ZN.fh5jhE:hT x BQ)orTSك.WXn&Og+?~$7.f˯f0hO'7e'~:VXǛ=-].(oQeK}bQPlj $3ƈʅK<4RPrk*P58lC9SL`DN_knQmP/=WŚ\j$C?4S]m}פb5t~s= ;ǯ&^~J"ſ׷~ݬ*P(qt)Ps|d2^ث4Txgs$]c8tKW(b;V= ۍ+!R""/a$F5@b3>& /dwJKd=v>suy(@RV퀫W\Sݭmy1UY{?ٹ*]͖I([gu|8gpE|;yg@Aշ^ Y"S3Vy5jd` %puߍ/s…x<ϝ3>*.Bur?M^)t :}wfvyQF@ً@hb)_eBxDhz œUf Zxnq@N?y}L2`7UN5Tv[Yڅ~TQ0_&VH!iڥ&´w+iv_Ûo9Q0v,!]ȝ5Z Yo>ҘsORm 5׉$tH@@ #F9YRΑ)4i0 q-ý Y{n!}B=Ia*UpE}WowN% F٦1 nv|^(dXIڅiٌ8|4V7n,u~ Rbm\ǗVFCĴxֿ" 2F>6'm;u`GH mQ+h0n.3P@8#W}E =#09Ǝ;DS F2!ƨ a2\5 xocfcp 0 +U7HnK٪=mh}0hqXQ/ǀlPoҪ`hck 1zf飍ժ#Kp;UcX)A)X);jN ${_wJ=1p8.bk)LMyWifʻz&"8:iTky$7 Oyf7~͊Lz^Nj/vb^lj^ܬ.^_ߋ`?C ^# %,р"B,v7ߠ8"ŸK?&%9>)Qe,Ɍ'SSJ@V/hGU gbdR)! ;_Ū"|z~i9NГ,f.\v"jͻD.L0ť2jeB3um2D7s+\fXoHKCcXE)2#Yp4 )5d K  Gކ +xa1b͇˝J]H)~frP+qܑ^quf 3SmxF`*MVR jm0}`ԧ)Lqԁ6RIt 3*avmQI*t*t01|{?ߟ;_2dVENtq1K_Q:Ezgnynny< Ϯ)‚I 8DXrO{%WNg'4&W~?/'oiAY@jOd3WK5u)7k3ݞ4_X:(aJ,O٪QK QUY(ddMcZ<`Y@G24OP1)S}<+xHC4^L0`FWc:aAp2tˢa.F;@3''^)>SH)'L&`;υMV__̓El3pmOx8Gu%G h64xtNR`2fN""SВ%ZdhP^d N0D`ssPIsHk4CȽ(fd Vi LkV,HF}jC%EosMv?ᡢQJ0@Do@h 91g!W>(iNt6ՔT`j0IEMV4TZbۖ/E& n`N2I2zq}d $s.QM9YąvUd"uO'skW31:$O t{~O't>9~~͸.H<ЄIBb{S,& cBzfSm"Ԅ͒=x"TlR\̀KIt8`mzYIbRnl#H. RYSQ%A߼&*/gYn$ac٧Sn5ȗ6,I]pI /L(屰cްs/#&!_kRaQ Q Xb=`C r40yNL(巌6J8oG=&5x̎tA/ g;UԚ0޻3P!Tܺ1q Fto ҟCT#gy?ϠJxədAGdH62UOs7>vъx w¥GrGK4 uaCH@^ԛ0s2@PaRm<Pv_yl+EtJPM[G D9U{c \-Y9Q"ͱ}B8kKuD#$' wMh՜:+i T8}\Hegz1;fаRJ_+E?K- X39j ڜPIX)YN'H-kCB>D:;&Ŵ7PTZCDo%0EaN=7#MT;*Zw6sAr;*J ُQ"h1%jnZbS?~BJB7XYLghGSZVA>I<Uh!c= -02,?E_t/5d\' ̊pE 1/xP$8_mۛz%S&~SS*d4=\oWW|aQB/ɔCgU?yD*ӵZ>{)^jqi$5JR)u+>>Qf i?tESƯW;{?nOT YgT,H2ƴ]`R"dYqABm{.b/͆bZ݆a+RYvڮJ#;vBYCcIHy#yhÑD;q2BQHCz=Ce|^ZeU8..mX57CZb<1I  i‰)&Ij =26 F|BBrʘ1P;CÜRaLia MK7Ȥ(8Q !OUPjWfM|ZE`.j<1ZA:_XŪH,f0,7N)-8Y:Ut* eÁnⳙOg۟tzyXVUU/` TKS#rl0N:H* Ҟ$N{ϛ>M0Rz0kԭ߁3gXggs],3{K~wv300`m&.>iOA!-yHK~ҒӒUV"42r =Ia#%X'F$ \;.u $z֭ [H=9a[5IJ$"]ܼKM04=z JJq̒ę?٧:%B\Dil<L[69eTMiBuc1e:i$1kXfG$3 ;/BD1Z'L)kS9#Ъ Ҽoy\cɈQ[zj!dI/}%!Kiƀ$4d{bGfRKX; /.,"Lf C{(j4L?L MC37ɿkĥDʻ,@fj)^x5(Ah-l JiG Қs0HrMAԤOE MVtB0dAd;Pj A(i9urycI,Dp T#ap3Cn5z*r˦A+LoAxG!:iKC0v 9biCHȉ3i ~¢ڮAy揂l)y`0aւ$*Xk4k*8NaeA*fA% $AMa+$76|n!cqcO.x58U\OFKSNx0 : ų$ c̍z|̞?Eu `J?٠77(ʚᛘ *QKDZn`@ð fDy8& Y N M vbj#Z$g9ﴯWJa' ` 'jIKi" xG ixV=zY`4ƆYQ0K%9$YBVäGSPyJUVSfI6"ƢVb̤\_fW;$5̀)@ 8؄<+GҠ?cVVmp{w>Ph!e oKrS 3^S /͹x_Fc4f BUenpQ@a$^J\GEYAiR b'j>5HK׆B=HDjH䑬Z@!t`W)DQ*};r8Zk6 PHY9iG5=.\C*TOp3]T8sUՎ\nC М>`\j<\Eʐ1UFKk(#'W9d5IQ*'v$&$ɽ_##\>1'`&iJ[vg \Y 8̎%$D&MQ%6T-$۲3d:NG%bGJķUJηnlV f. Tۿ8X *IixsK-zAZ-g`^Ov|5E\ww`]qmbXt=JA hqwf .xJq|CCߩ49@osVObA{K 9g[,+xp7g㏣eY|Eo&5t8GN!(y>7aC zW`T<qЏΡd j-7 [鸹t܌(DQ))6RZ;$#0M\f.pɘ wؓ1ɸuI#oAx:rLvf;x;q}_̫8M>e:r!vĞx;2QkMi1x;-u 9"DzIW\xQ[T"&=d-V&aܒ 4 )'Q> _˜‡LBLc@wFмt܌U(JQtw0E%f<EFPKnlnƃPTWTWG; KαB"CBkgo`} iUx;[X.>FyqT:A"Ǡ}Z ּ9f}_kp .U} |ԎU18H QY/YG;@E։;@!-7AP3nt~끪o&EÙH+]-׌U$?\MP4޵{WsI~msƚdM`p䥭}ڽ9-{1vhPСB"%wKVj;WQl|jI$*Yjn΁+PѽV#{pƒ+BB#!= lJ(:+2bXW1>`Yk# : W0J[`QRSD`K58\Im,pJAq6 K?{Yx i.k4 {]K#mIqD 5mv.({[gg/Ϊ>'4({jh5uX O< s15w= o5=H>+ڥ<^uaȮc4<=*}z=P8tr $%V!C5HAx85R#G Nʻve@qD4W*OZbZĘیZޤuA\)2qt)oI.00y;i~ZB4*Ƽ|8Z82&sfؙI#d/}N9G(S3A$e=JTm̓,!qTPإ(DTp2U I8ƪF`O{jjn΁"uZ'nϕ pZVB;w ;3]e<펒*K^]1o⭭SdVWYF:rI6=hP֬ɖ]نQ]~a{EP>epv2#ͱnsڮR5z^Qz -Q[nC(fhFCkPSIUeV.NE -D$ipܾyXD[' /{`eC<->0-T~:+nN׮WmQ(- S/h5DWp Ξv_hw"ה(c:"a Tiimi&.7/kkS#tkz`,0x:y(qi 0ɕ ɢr{"'i J? ;a ]NGAr#b73 0"dn:R1 b5ȥBי)(W>{J7t{xk2atC,;"M("inCQz[번Dv(ԴC%J5eۆrS#"dFTd|(PXcVi mPr A8VX]EULbn^}>S v g%TX8Z*EP;{5_ I jeOGӉ ?ӰZyy3P@,\btX?B'/wT|q-}A:Q'O֏fL,AݏftA_>VEKDʻ8~ȁ1 Ut㛆\4:zow-Wuc8g~]?~ ZIbL/]9gӓؚax84癟:}Lp>~z>Lǯ|ݲƗg+ f_ŋ'7o_?yu =+~? o/?<q>5< -OsyO>MBO_VT [̞\~QllPwh{Ş{8:a,xhپPvQіw:08(˓0J,rI=X7* Ivn:~~ ^zn|,.bz~z95 @wdv 6K*j</ch|@h`i]߭LL2fߕ{y5\^xW`pb0z9 ɊGO_x<[=ns6`~2?.;Џ|_r6'J~`r+(Xr<-zffK0@oj'W Z[kahK|RflJ|?WE];8,.Κ]ѽzΗXGȤDbɕKMl~0bl4atzc(v˵,"bʞ 2ǝ`2.Mڿr, Gp:Z V! |ߍ伙FfBLT=#oiO*\D tBc2Zp%U.uR6Vj Pv}g%4krΑ0N[dJ@)gMK!!eZ҃G5U7ںl%uIfkA2zEǞl~dYDJHPT*{$?GE!"2.|dW^]1m )A8`DEu#kX_a"I$ *PRGYX"5"1qMfcT$K) QqoK*W?$I IrS=]kntĶ3ʴ9"z}J7_6p_u:Ӄqf伷Vޑ[I`V(׿H>oŘ5><xf?^׉3v ˩q yw-~{03tՋX+.\X#~]xyylb0&k܍m2Q/IS3e%;lEI~IVPSȬkZDSZfU|:_r~0Yn# DDqxidm @QO81᱊!ȝazSVM%4~g#➸S*CT IN;,3sn7'Hv9e ile9tP\hTԽ7J#xBG3 :wq|28HUOS)B[Jp*9W[3|,嗛-3zKI$z]P\"% KLm#V<8.um= & )q$ P19.{%GShN5(\0U1q"J*Di:($dF%,J$<Ѥܣ|U㩖NMFV8 I]D #0"|TC`Phd'˰ Jx6nze&N*rD}v8TsR #thb2dLi$90)=!( V{pw36Hw7B )\CQ' n'L!M &t,Fi=! P8`V/2qd8b i,9D USFP0!)b8"Ugq铗NWTA~{ ּ~Л5P{ZxR h5O.7@qW4jo.oԹӛfN | îdj ps|?IQs@zjRTBUV³5 ZMʠrM*X%{ IQ֐W0u&E-޸-p)hMBCBXH{;MQ2*uDD1i kʫ8۱[ĩ,jv_+qt\,Ax->PoR[gl* TQ_ƒ^!gQh\?R:\zn2iW wxKE`$BE OL N4H ΰO]F[: mC)Qӵ=v JCEԉ됝E@F뜜q<ŏcE"ԤwHq%_AO l/<_>O`.?Ü3o!a0$9y<swL&3N Fv:VDqbghʾg=P){&C^$r"& y༲涤m˩ a/= ૬@/]/e+J߸+ỤnV#VBxNuJ%Lm ^.՞o-Эy:1L5u,l(GpBb3_Rp{O6 w×>u}\7.{]㪩jb\. >ٸFcLMӽchM;q6.2;W .R.2ek/ QMz) A8$xZ1Ucː QgP,_8U" <}>՜(UWmчQ8&M>y^ڳVGUaYg'%A^jnBz0݆?x% G~ɮ%#vJW̗֝=FN iV^"_g-sHzaQdo=daBk^ֆmmP4$X@ "Q QDmF,ʘ.暎)e#p8P*6t#獠c84ZnF*"5Q^'P *JdR@`t+W3Sأ +ٗ+E0N mE\7-Y_B hkʩ]k0t62! aE,"Z*ÌLB -5wa1 R"\At)'I`LV-F GJl*q&Oon'% 2q$.\9໌krnK'SFP~97ˉ9ptHJ64 t7Ɋv`&K5Zn[k -(dS F ,jh{w#X:OzwHVtyoToŚ- s,'D /]D[Fw&ZEcaPaOY1u5y46?*O\)=?P}>BR%-ɯ3JD3E]K̸7okɅlMwאR!rU]bg&KDaٶWE+Jr1HR ɫ u)I[ZJWRxty+O(i,]WxJ)So>.Z3 ;`,u5 8*%/pa4˕?CE䷽7Rp"ڑM\Hl&ў{Pg)xq,\~}7,e<_rCB[yd'` Opby24N#B{^VOqoa>}ֳy)?WOor$.1y`EVڇ ELTtn50$Ff=b9~ۙs!^~͌|5x?y8k>.O2Op9}J'N);bdQ*q`@72-yw]T-)w?pe>gD0RP,/R1)f(UĝLk ^w,$h}vM|hHPU{I|[qAH=%w+W\U"1ӏwF N/6n"r" E<Ƅ*WtQL8TsR aj šƨHraR*}30ilPd팋VO/C5P>Z\Ɨ cyKqs.@pĒ( |v/J&AbPF: )3"P5m.w%w"5a$?Mvʐk&{!쿫>GQ['T0Gd'\]>i9a K9/}؂3>ÏcFt9ZlOã0P;3]T5Pkw(QL.&2k=(v㶜Ikz>:wh |Kw? g=[sLp[H/xcxQ`vTRxFYtErQ2,>K&g' 8G A~zp9 Z`<\ϧϣjP#-xBjJص45ґ%=d'YzVlv5p}yp)8'j Zre0R\ۍ/_zɚ=xszF[,[?u(&l~yRfw # >A^ WA^q[1hm&)i_ƫ}U Am3xHSP* .UP>CrlQ|\<\_G}?t7t*ͻRq y9E~-ޜR]OL%C5Qw\،8(6G!jS**qqW}i} Cþڊ!ȅ?oiq2ˋey>}>ӯw kl%t}1,&Lq*7_.foj-Sr޴9!_bηώNӸk`A^& ZypWȢs'tq t`丹\ V 1˕`]ǎ;wHsS`(e.\;Ҋ`ke{Yyci"t&"57:֒?qv@]<=.k if= oqV~wa> B2&[֠R|b{!䄠LV=VqYY ?s`2٭>^P'J4J(q-9(}Mt-ܚ3?~;Π%/8C(ѬL0^U19OW\omd\{nP*pI-:%!e *Y\|3rs،|Wlz'TTl;~)kqoP׋ NC劖Z~RWQF%ќEK&mBh'u%K%K-/rZеi tq{m{hqpQQ KZ &R.`QМSC :iDZ\p讌%L."ّ;Fi4m1*]cUn.he,K] &es3bX&]BqQrA_&~o0z6[ 2Ԁ?6_WY=5F;9qeu2fu-]7CLi:K%-IR3Ckr&92gq=P!\O. 4M5B42іHI(cFP{ZR҂2~9oٟoG",u}Qթf:Ȝn݋4͟9}N~6[aj5q[ʔBazۨX+ K,7?S3;t6鄥CUWa!n?2\фh=R3QKrz 0R }P+nUҼUxi%Fb$ẅw\þW4'klNTn*]^_}n/IlUWw|>MYg[)M`m;[ eYuE0Bـ\F=T X-$nw* `p~k~UX< H$gc3г~}/$|b{ec+3BC}$ZK՛;KX85wTzbXBXL_ZW`_F#ZH&jG4z#>FX`*,ڰb6J!~EKpM==NQ'o&VZmtARcP[-m*&QV{L Yb1嘕:›J]2L2(P-SvLTüeqUWQeqUWQAmS9lp\YoS"0X#,nśIhtݷO|DB*? >|\`:Ż4ɗDOru:LE/?tv+7瓋OcFWι6L?o..jV -_4 xCMqpS Ij8Yq|IJI_~=J0Υ7Ov'm#B  pAxJpxeCƂ%2.dPBT%F%si"{2{w)kv^?ZdAT.~klsn K>Qd I ,c4%%MTK%H)Zh4XZǹ EI#ؒ/(,`ѨTj;ژ$sd@٤}43c/@TtVJVW=άfH8 E+[TE+[sesk]=5T.֖c5UҗpC^p.q Xn4x?ښ~FB&  S ͐PX=}C\`5n^W`XJݸHTU* @Sl&zydx2KCJNB^@OIw ׉b֮ xyTVQhb^KDW&TNeH#m0De$evnՐʶ0 Fl^Tp;XwzYu+8{#EoihYofs%a~lJ%&AԐАt ,"xe!䵴P% dK#Xzm[M#e=Ujd6,;n@H {goujHwuVgUYUguVYtX)AfL$UJ0BrsoʦmVtmЦGxgg7h%z6Fs e?_jY_?O?fŇ(})$@`]R4):Y9q<EeYR )YTc,KEx"V5F֦'+}Y߱e }T1XZb̒G*}\x%I$P#u2 5CU|q:v0?t\܌˛ "NhF ;JR˷ɛьkwΒ&քi?d 2K.AK\>p]$,Ȝ,eTR~zf3罻#( hf}tNQf+t$` NIȣm3Vq辈aPfu-#5 984^k:퐷UXo;l퀙/'dGˢR)w+IAuRc*m-Y ^0[U|"O`N#Vb qHtt:nfv^N>n6yQi Cū]Tj6z# V5?N wݝiqpS$)=c3Ǟ 's]koG+>콻Ј~_ٻY vE%dP% +sqf\wꁣoʥzA^NH(]ȱ^`(s{-E4Yiӊ#p|)VLzWѰ:QWIˎ.+ w.HٓPt]odEDn!Bpſ )}ObBoCdyiݍ/x蠘l\lpܥ*Kź0ɦms0% +Mðbg#0N{05Mg rv1q `=&[^D{f#ґg?CP}9ób>Њt8=w&Q ?3PŖNjM K\&xJd\0J>ĂoJ1pm:c2k<J Kf͐`$T8Z5 \v5NJ݆/hoO+)+>rq l[Qch9^98+^lpƻC,9:22+F#]{'Š1I Mv) c] y^cP eWJ” _m(z<kUl}iH +yfI*A6MKa%bCЅgcK35+yv$ j~3 ldbD;\ Py?/ 8oe3Rlx XvA)AN!tPJ q!\R1e!ryЊGAH*G$ !/TXiN1Fx ( QVQ$' ;DϜ2F}VY1"bFzޫ25SZ|8=Lm]p{#W=l* ,# 黾bA$kי\ 0Čn*.Et0 = (Hr }ՁHv4H#KPbP/t#Vjuų+)^אeFD>w4VHxXCHō\T7I3W<\P(5[ .=f.L zH"YBxN4z*iߡn|ü7y2z7^-0^\Lo{~(]qt=0{LeGURug0Ǯ7RfuF2.p਑ԧVu׽^N-6xs(bKUh8c6Uh"<foإT [+{T[6myBT[-kBVHo0Ȳ>7l3? :].yV `vTHH`{}0Tuqu] [xՋvEbc_-%*jbQhtnYp9Ϊ,8]$& Ic.Y5N Ob:FގU.m]B%Gı@<ǭ D.߷3[IjI*)+]p'R~twY0XekV*+]Ŀ@)8a߭n@6IrN/Nݬ{UyKIX_0@!ȧs*]v r _++?,c9zW n[/$5lT?QTyta Pi@\Lh}:ƺwd^J= }uGKMTvQBk`|S0z1'7s -"SSc|+W^AX,7 䎇RAX?L^l,L1spޑ^CkbB)nb)"Ze,nۋȑ`%R^ۋ@P#5 AyKbP?@ukÙ:']>Ď^G6vt'v3ތ;f&.٘T {0*zr׈/.n }uŻ/nzu pDs ~AW7n1*JQQICSg.[D->r ?&~0s){q_U35N'o[v{ .Չ-DJ0 c=j v\8hz\ q4hOƣKק;dhB@a24 7r)qip MS[40uk;&'"P\S `AB8&`V8)B%ubiOZ RA6V5ji:1S(bRoDg,(+ G` RYw=؄T+c;G#u$9&I;Ow3[4r ^}_\_D'x s<y*! YKJPLxkCĺodjpӬZ2,vs{Y?&j ۔`$䨺pV MȽ >l lQ298Nm* (_J \4)6]!$,AHEYY.0%;XQ#cgtSԴTd-iۆntcs ]u{5&ݹp[31XK3 }E }oavv2c18S[tYzw`]#dnB#); >Ҧu%f\If;?h~3#8-/gfJuy,x @% y*IeG''lG6G;Vlv”Cd3:+G Xѓ'@E9³"i_f[`&+Z0(iΤ\ˉI$'G@ZRYBM5E!6 -$@k˶lDŽFyF\hA8E{_XMIUZx_10}EcJr:~(&y3P:HǼ&R0R/gtc8:πopkX 2ǞsUDwgj4R=OseŸ<%Vy-NWLYilJ`=,H"TmݜA0#rBZ/d] lx BBZѱǘ]\X,zQI3?iX[X=AB(ŰWCxZC;jY[%DɆжmJݓ:榰Sr5: e@И*}sYnJwtc(t0AC\*HrB StMWf/'4*q5[2ЄM/b wt @zfh^igD+ijb7vgϳj %6c`{B!֙ >ٱ'f6t6_Ýrwǟl&ΜwJOJPI+3?{[vB0G?EA@tL@j?KMLlRh AynhPᎣ5M⼽hv/7 wM?], ;$;)!^t7L4n"{#m2Zξn9y^H|.[Tϔug6(&vgc9Ej-YU2 36a9۸sH <eI ] Ү'ֲߌ|ڛDӻgӀŅ=mJKkۿ?Q4W͡ɾwo4A#x%twJJPx詿kͿ/."ERw8_'S!%B# ]qXu3JFv>p~Viac)Eh&uϟwcw2Mnfu_\|%; P;\}HX .8ӑ+S܀pϣxg)L$['2^eKBCMMz  D{[L'+U/k^\Kw._F_ןQ".h7ҫat}y;fOK|=~П??I͛{?{y{,Nt=.glv?_sO曷 ^_o=w*=_?N}p06KTޛ>~|ݼ'5$\{55nvP;Lվco'?-hΒt6>~N+knXS2\]}~H>9!:ԡD9!N|{]ywA?{?|."<ֿo$PX/{F'kt&{~=˴3W1{{*|sӛv@z[W lJ!Z.LKid01oEFd̓oĻq>8BW%%bhՉ5|XpFhĖ 1ks~\՟dcMNq %V!,ס$Wo;-.Սnlp]a;8BL5b1L $:qsۣVm%MmcDX;a:~=yGjnf[~? S)7f~ `0>QpG R\o;  pyٿ op䧞ϖV;g4'1”QkkZXmkbpڪXmT[Wz1S5Jp#ب[WsL%mun[x7Oo?íж a uJ[5tq9^q4`[Glɲ (4Vh뷾N4V2#F1E+%ue48πL4II|󟾽脏nQ^w=E9ٛϣWŬ'gϢpq欌kp#-VH2eX% (#4Ay;4v&56]6N]y;!ub.1Z!p?uC9[jvU$MWo! ޘ@Me]߬ #Ѩ mE|`Q2[",!LN GHW$bE L:%J \qrTZGX{{oF& e.腓y]w[--̺W%WJd3$oirvkSCLUrYj4AP"o7$⇥ NfGWyShauZjOڮ qS_X< 6Fxrw6ΈRM/G_^Idӷ9Hɏ\Y Icnr87ͩ@Mb-ZB˒bs()USw\}u7aCMZH*ejQ(F/r]BGҏٷ1RY+Yf40-@I 6MR!eIJ%>Ŗ7b%$?\w i,U@Ě> Z| s f L&A4Rͫߟ?:JЌ'o1pz*VUSqc)hC1g7?,UsyI>^cZ-Om`mpnrHT0ibDJLRWsZRJyeL[^8>A%AEƨC`m{] 1(.uz/ٗc cMy'pGuwKp ?\A*^tt]r S]eNwE˭O>Zvw5|<\ѕ[u Éᾲ,k񉇠d/|rj}sfŃ1Suab9tyhgne!˟BV~ӫ?deЋB|~D" LM)e~q:M"/)ZE~~%(i|8`^Q!MiI3*iXHmd2b$ oL5YdZZ㢹tvl[A$rf&PX5ܦ15k}o7}HY2A+9>]vr/oXdpqŬ`K xSţ+08oi2HMAI IV {;pQj($!myxFzs1"M1m!Ev7+g3L7g0"Of]~cfѵH*ɖo¸^Sz#FY"̫=sC}ٺ/=n"U.ڠحMr[ AJU{|Oͥ0[Ũc7bV]|N>5ՑއWeO3֪Niip4_.*m}}>n ,5U_Z+32ŮH95\zOY;A+3́0-#F{ra~Em"㚮jsjmi,DǗ&JZѮnD+C՗^Fl ɉ %gQ4atKIfs7;U}!gܥ)e)ge{fPJ'BY%S-UpJB3Db&Y ;V<-kT#bB4( jaJmB>_RZ QGgU.#ۏ=W("֨ MUQYH|VaB4KpV!ta25HcSV x*1rIĞ6+xƛylvhit{Ƨ0=K>QDx \0{\2Y[ >R/e!kh[]j(JЪaGO敖ubRڸA:T_vJif3TђKX5TIkͫ|U'T3iٌ:%Vl^GA.<#Gì(IXC 8-P fѮFEuhݘtZzZbM!~Zg*?_r6MS//[BK-!Yu7%(`] |)ېbLQRa )U|w`G3̕%|\? VyZ_PJjN*qg>.5+NxbH |3 bLQ33B.lf4y0}kdN|gК0uɕq],Kk/`+ݎ0ϷI wƗNLa 0;QrK1,S^w[{b8>>~9i}ĝp`;^T.',ǘ$u@i} WWjcPeRB&wV)s6[].LKn%ۏ4MS*oȔz%0i|#D&KL͑ARc1d ,|)!1 ^}g0QZVE~9}|^\ؠxs? Tt˪N 9q%{ˈ,:7wL0\w"xɬCՆlZ.ôI!VSP:`'j=Q0?qԘ > dB N`1 |g+X7 @cnU1(u>c֢NYѩĤ5Ve[=wJ}[K#Q`ɑ)6H : P2H \DýWn1"7}nCR]M+CDkC?h|Gx>tva'2 X{#&|X1b~czv!m1 TIXPB#vB&L+;I \ H44e2f@0b VRRy0w~ "BpBJ53҆2vX15&$ 9L(NTiŒ-ԂW}<!t2g'j0|XR^Dix7fכu0{00\jE* " Yjm]VR.vJk bR1vvRT[n&KuAɄN7.ܩDR͸0ܬ:O_b ssLuĕ*HsXpD`}D1p=|Ba$~i9Q߸lgwJ\]},%5kiD (QN :0 pP%% {+6hЌa'сڐ:ERFH0V֦\ď2rw8}Z'M ( DqaP_9&?0ʴAOBK0ZH, *#{oh'5C8 VjL>Q]rV) lj',e)Ʃ3XR&K9y7:6fA>la'C#Ypd+!'CN3`r`d\&6L!QmỘq.f;qoR@haClarB 385&W!`[ 5)GlO{˲;D;Utљ2ĖV?fkNnX^'?Cn:%JT[, 6 8ˬ۞lrw]v8 2ݭbHmZ^^?¯NBxsR'AjsRh~}~`Oό3R=Η5+'Exz3~ mGfYS?$-<ݗRpc\P,Di WyNkw 0"onߓCq׷>e;0#J,1>:1fqyzq:ye)紲ve=-iFVR).oכ ^Xz$܌X](yhYN*IkEY=YJɔF:8 /3_ϥ>u,(Yg-ֳ}YvUUUUWòvx HeP ]#JSW`֤$cY$(y~ IEfҫh(d 4]{.B.E= |s:+M{,ZS֔43U$qAd;W?0qȇoטo3 mOS㟦Z##ncIm#e2j\Sl0UyA6JlPΊ f 9Bb"p5QMw16Eb]C?`HTXB#+PFJWd4ٳ?;ʤ'AQ ʄ`#oJ|1MZp;`rhAmZ=WrOMhsƔ[н`@++~Zs2wb' ̬ka~T4o+!IŌdK6S!Jbcd`xυ[w aB>1$͑$ dOTxEhK,rRzȶx>30sVle8WEμMY8aZfaq,KW5HdR}9G3l`Gu|1+WNGm+PLv%+`~T2YPxYCNĘ7<$pmu dTX3B8Z1#!t3.Wr%+06ij4hLӎf)8?}@l_pkN+!?xc>ǜ*/S1:n:1^YTw ީ؅#~mÀPiIyilN1h²Db(h]VuAi4{Qfsnt;0fun& #i>vA 8pC7w3`XWRBHiu rlu v3oV)v3akvÎ7wuBSVoqƁ{ѸqfF[KaƉ QpȻ}\F;W>WC:;Ŋ*سB0xwZT8 xٺWZrLz+ʵ.WL.7w\ *]qYzkuyGG;0 }zãqXjJ 9k =$gh!@}!H,fϑ-i?󝞮RG5 ULi1̕#B]2Jj eTf{]24\VEYNNtX/bп( f 7p 3XrԻ`coעz-Q9@؋u:53 >M4SC39]8+gu4 #ktE+` GCtK7 c pwZu8D[ G>qMe`e}G͂з\IT̄I8;|q>tUo}huoF5˷'ЋOxoJNVxrQ jͲ?`|ssKt[Y߇bMd̎~8xzr?EUC٘O.ϙfIbď󍋞Yr(HKYoTek1TN7 `LQےY ^p2[Z]1)R6*D5\*}ڥH0HBFY&䏎a&{BE|_I)ٛkUTS)x[Tq<ÕCkQh[sݶCimvc^]LsHRyql` Oy%'v!m͋~(6ZQjJV)Y$ckkDYIxþMlQMCz FU"vЙ=(Twԫ5x^,~6b= i뮪mt2k})1]?,ww4_43%s;Pgh& }jW]7uV熚%BlYd+;kҌ58&XoewMZ}Ҁ lDTH70boǢ rU\Os3Y;>s "%V1?u hd}|Ӟ1ZtVuQiCu`ꂕg 9$l妶ͽ~1s 6~}2^+`+w_^ޖB!,^ռ O|rslO5fSMό]m2QY%`3s'w’LjK:x=>-vkF'k{*g !2,2͙7A,ӿ(h92Te6[}9yѭ_?P$+ |6ḩ%]\ĢϿ~Ճ٧0Vc勎O͆gcHk~;-͜0S'l.z 23Fg$u -|ݽDRyID+'HYLowG(El %hq 6 QT֡Y[N2KK*})'gX"U-z bÓ:ܭx }Nr7<^`qraE+ DF-/$ Y%A 2{xn<*51&g??wq97otIPߒJ%aE{*n$v7B[7$ ĎvȰ4S$&ogٯM% ;|=z{zm|rHd!] %;oPY[_$Nz)+=p5pWZ_b&1_' I=E V3jxS7G?ޠ?^\P;wAvm ٵ=k stwIj'%Z]0^vOjٲ'r&̝d?E_ ;tR]IuM'5]S{yJj?hudS>fdeHWMRST}ШAc cJ%wyJJ I$oduvTX9!a97lҰk1`~r#8g$]Ҽ?^^GM.'@2F eB(U|s"eVU*rT^6촣Q%M:ؠ*։, z{mPTC2rdP6 jYՁ^ e #usާ&8S-0C1QE;sqɐBaVrt{mt3Rڊ)XLbRo+4kǿM[V̎1ÙL6Էb:8rYhـbJ`oǢCc5jڇε&]7S+^״5NwADJ\Ak 6k뙙pKkXХu2d{pZkuh( 5]#~žXϻ7HG j) [2@![N S4.1VTIۚHIg>:m -`m%O %[(x[)Cm#3~K5JlI}'v{^R_XvhYB_nٵ(#I/S_`Ʀ6Lne&{u.ҚԎ)kjƕOgk\Z'Ӟ=lJ&臲S8N졫pvǑjS_mhIghJCt2GJC|bU[}VywTGY+^ZpRx[_K:qH&qcC[7Z_peF[\~;nSK(VqDt`m:AGr(^$I@cd5,(2@Tk9S 7Gޒ-ՌеBH!o )'@giIX2lK/1CF)g}sA^7h|(BMmFns\0q 3{<ޚ}̦۫.l^_ IW!8|U}*L {=ƿ4qevFzz92vBm`oٮ< I:ٚU<]jR=\/ҿ|xasG/S~'g@ttC!apBNZ 1#9Ɵ]\ Hv-j2F uO5vgAhSi~p{zY8ftϾ2L"{+s%O*-`CC/0E? 7>0h`0s:J0FYO1jQ"/;? !(K䗇4F,ł)e1v?Z-T.9bW(إ96;΀ `K+04`x)-8"8Mܧrbb81p2mX8`?f k\p>#D20S-d\F^JH.a] u{G<_Lab35LQJpsV7h.j:2:eרSGfaV2|Wr*10.8=P#Tqi"|Ɔҧ;)Y/Tp,xiJE Gk:M-w)Q =KgiBtςT Vg ^e *^ʓ8Vq&Kühҵd%zF%`//UчAUuRw=͠X r8^%_yΘ"w*3`Ek[wm\Ml^{r\pyC̸u|^} :N ǭ8EҔZI8 qf=bfX>AZ' Lv[4<?ލ&O:z`GW3~ǚ0$육6 Rʊb1Jˌ*yAcnge(%r)((g\f*e-L--)g5Lhm_)TEwg^M}ErttdJ[d0Uz>\xk6DC lV:hf㘼KptM0\uAQC BgX\fAJhlPdX8 XQ>Mҗ2wqAb÷ OD5DI XŌC&HH@Hg9fЍByα\5aG@>E1 ,=NkkaLB1 3 rBha=e-*cK%QHj.vGWT=-{^3f"]>{VPn[ʜpfFZw|,w tuҞ}O]G|u*䬉z+tς*WGZJ(&1:mC@N5dMD{"ZYwd".tE3͘,3u8& -E/B#):X4(cXl4z%ZJ.JUHqF*䔴q QTϫ^ 䀋 XȊsAPhKBKZYi}?zypp5vKV>¥q97ܹTv" mG%Dgeخ{ ] 놢pά׵{ $SgW k3O uDuvxPB9\*|es>WwUDNj]'B/] 0H u#UMzRq5ܫ8 0TJ!r,Ǐ˽|.:3i5WZjMUJSh-g:pE0Ɲ2Il^[=|+6ׯc֛{;53koӻ'u){p,N`jyU{F'jk>RF D[=|lNxJ$,e;l m"j"T$ ~BBKJ՗]˔fPMQ![\`%JZTOE%>YEِeF~*5TQ_C=I\[Zϒ??OKa/aUs'xE!K){Y{?=|klaFasqzua:Vmc[ fƅ43:RcOgJP6#!ǭa *Lܫwi(0[Jp.pJ ,x?T DDfշˮhiQi1f6)!Qe0 ¬[ePa"Ax줥Y=)'p֬U:t 6VDm!H560%ֳ?nsTcy#aEjfVZlV2GCAϔD)'1A2o8 ukZ)8Gf|S֓"})CL8ᨕ-s7h(mΠܚ*tlRk+b0`msu65LH^qXF-L`$%Gq< s^ >aڙ4JE W`JḊȦ1Su)udL~1,hi3̀΁O _z|pQ\!#B2씊L|bDwz*sv]t[KrE;+zT1˕skd[G 0j6v6KQ!.,~ǶD2j Sp 0+5Qmcc>R3Le#rS{AyH &P%sF@)ZB콷Libq:Dny vk#:6] `lz l˄¦*¦ihtnTgB K`Ř Y^2mw,Pm\D/ V# *H̹@d"*2+{J=[oG, ;{k^G[rs'Y>3 4 HѮ/%kaH qňhXipX""62Øhk3N{EN8r[m`VA e`)*DJk $:EE8o).REc J5գ&#/ƚxG\;ăq$Wh}N-f; G3'Ͻ8BNs}P|@uţN*m.W8G.#X4A<2 WX61W'/JU aG j"_*xzqq9qD׀n^jAB,e,vέve,T̛ƕTF˕YF( `6mNZF%1bViA'bNt &H|2=uuӷu]?mַ0|t<.5XGw=/4e`y {R,8dakɑ0&۪]^-}zgeNFQfrqDa:#?,>g~4 y*B\WhvN_lA99ٛIq6)E{܊gňf8+lFv}ju8x;~꣒.|;X}ǑՌ&HXx5j:Hˮ55T3]9 UGUNELTG aRVtqgTec9|2N]ǟDBbk׬<@ޣ/ݞ }S" ZWmN Q,ƆhGJ+ゔ(DbCLRqT`%RLKϪ,8[ 9mq`?N*`>_sqlg-MAMSl#M*mD~R7e- B؜?Ż㏢!{}@HVȼ﹑BϷΛ,U\4N9Y6K/o|w;;E"&r0+Jw"QE+Tyoaq- QPσPd®9DKILCǡR[€d>8W@Ë9m%nUHnqsb J̔ <^0z1/X_F-V@N;jb#F9fA5UGD\-zDF,L#Εkd}9G/qݶJ_-Zh6WK32W "Bvښ~{K*sIہ9 0l Lr+*S [At "GRu#] :bEt5\*KvՒ%\PrA{ 4xQ wmH_ann|2 77d'`nredI&"UEJDJLЂؠNzJEx ]e)*AXx8lT+,Y'cy )_SAi+Ǧ҄xZKl5I8(\םh}jYHA~5WF!*qp3O*/ mE QQ-D$X<{8Rt೒69X iIK>Ar*X'N6֠s~&`tE·n2 7U": `thWΙFU`$B^1HT2HTX()Hۙ0 d2y0٬^/yR@Νyl S1$awg}ڀ8ϰj;6EŬB3ѲCap+1Qٙҫ1wDa>C^y88DՑ,TUÅ*&OTh,6%`n8RxN]¦Q)C,HňLi012bW' O,gs*VEP~`8@M ȭ*?4EKFpA[Gb.4mt Uk+ |ԬD2ǿYbm$>ڂurd#-GJXjRR;B` !#N1\HjqGVNn|_nVH5ɍ;KD;$!쐈}]mF|IaˋWO|DRru֒?KaJRt0QD^4X8@c3 w_mmNR=h^ Qڇ=<('Êra`E"ʏcG)qfF.҅iҎ:7T&I/7uz-Džj`䚬%uOcy* oiJQ;ʌ$[<k.ysϣ&Xt)'w$Wq1R,J4"*0jdc*פO.IL;5$} qRE_/8~b`/#EqP=xŋQ|9^ǘ3ۼQx.3Ъ˔_/⤹Ԧv$WXҾef};PGXpBXwIݾ%]UIB?@P+,uHGM-f}ڼ}zEߠ¦!M'CB0ޏgߨ*Hcr?ޚRїt߬H)JHg/'8%Q2*+g׶y)ݯ FDBfL>@Y+d^c,Gk* tZ{]!Q {3䜶x;2urѼ= mX$]4@ݓPQ;DDDc='[1;ɞS=*%Q^N(9vH@xjDeq{*jU"vǷ털ЈQe ҝq7MQ,;wf;}Gy ;Ӣ%w ZR,Eъ;[!\cXb"u'*٫"ta⼹ODr[r>ctF_BeKx3Lg7kR@rmlȉqH1I煀=:Uy 1rUk!pœIVtJ)$,P*5GQ0ID֚6%+n{T^p惢]ׇ=xA4(`@|8\ԾM"$a8 SaNϊ!,ibLkz0^_Ѱ,8݇5No|eyR/>y PÝvXcjh`68CG԰"L׬IͅByB zap: `P 6QQt_*X2t>M}{x 1)~d6Zh?}5qԓ s'I5$aکPL@KH ĘWJqŷ6%g?F!\R;%^)sH& ڭC2}C2=DZ2λ;E鈩mU3U m.Fo./Jwd4\s,::s:NwlN*$|@E=|@wj|@>iBQSpiiJ<[R0BzR2MSDԓKCtcPA*fՠ =.b*iaM xf@[%4x`X*&gWEa]G5DCx%F3OO#=Q@iϐw` o<1=h&ԇ^Y5p89}{M 0n}5X|B_Z,Z1#RP_cU雜qn.3԰2cYÝ;Xcj;֔ZN4np\ WP"_^,~ޘ5*S,OX>0)3Y4AblPR8OčtKly$ A1_GWzJ@H,@{"#CIv2 NY1:s&tdzi ʇ 3v0`a^QnYdQ_n]^ 䓚%tv>4 071D7<ć+3R|8ݹ{|%_ a.\~ORd {Mo&y^oWO3o0z~ɦ8*^ey/7@/F&v(J_?a)b;s5:̓iQ6f,/VwcON'c!~lU56, F"k=X-?=:ެQ[0ts-b@UԠWcɥR ӫ, ΍އU6.Z\Y~Zj1jf2賹Ebo6~\y}1_Bz?C_L[$_ p*|y}S3[f|#dzNP pUM>93Z`p+trs]\ON>ʔ@ʥϧWiŃ1]|~ Rn]~=Ŭ?+LZ"r.׼B(Y} %Q}!17劎,} Y? o}Fy&5PЇL(F`|P5 X-*iXHA_A  YBFǯ \$K׷`]|i]D UuYj8 H263ի>f49EO,L pF"ҟ`~̞T'Xcu#F.햊&A0ӻrXV7@hX+S}H-hK TWK8ThBٻ7$W f>̧ \ton#L_IroZ :PԱsz2ְR xyk{ q(n'}&UjQU{b~?bʇx!|eCS'Hj 4z>4xb5z2N4n 4Y No[L[q?Pe>Xh F-wa[P= tʚ ^df4Ì JYmp0Njh8*T ˮ+(>~Y9Ɗ _țNMΦXYeYߢ=?*MWZYmViEr5-*ʏ6o!p%o乾/bR bT8P b5`ua:etr|уco`+dOsIO]VLE<TYYlm=+ksn_v=̧l;dfGv)Y,(NǨ˜OO!::ibUːaA=4'N _3r|/*wr\/YUqFLYL520%Z m#, oe V3fh BJ]î AxEgTNo6cFHn&>)o6˗x|KџʭƓ(ܢ#@i,J#GQݾ!>YZu!ܔzTNie~aZu2{XZsMӣ0,;\8fl!yPK:2Xʢ1a3!}idHxi\DD'sG <<} 9v٪LE "C(dZgզU2lvhaQ~ɯdyxo_LvTVTSH'~JZK:].d̔.9D`a~.׫6m67@zuX$ !oבP[t7 >)1ڐڣ KO1)81I<KmPF^2J0f'(UrbH]$`t^c a$Vp g j|m7fАC,:eͿǫ)A"}5s*bAwZ(Au En{>j!)`-YHm0j KlaQb;`N<`3w7ScrEpq ɭ= g#KJݟ(Wdu^\|%BٺBVK d}('+)>"+;DC99vyҋC!MTww&$xgaX`W񳻓?; ٷd7ed@:I<{-HvQ.=Q׈gd?/Õu-xVyW't.ݢk~V{ֲk~6b\ƟLu`#aJ)̱_i,eҒU#ÖG .rPˆHLHuRm$ ̍\k|よ^3\@pbA"a$bq=9 `woԎ?8[39`ʓl Z&:twxל$}mkVVΟb)WP!Q ^{9ApiuGIv9)v%w*L+1bTW5UJsnfB0U9PwRe%iNg2pC4JJ5TD!M2vTq蜥μ`z1fv 5JQ`R6Jtko (H/z\Icv*\PK)za/rFN6uQz]QCpC 0>0R##-Vψ'$2CS3>U7ߐ̮K\T M ^+sqi(lVJ0?%FJk}s[2c,o<:Y+B?U[+\jj#pGLNxa*H2OZeY\GFghKF3Mu/ۦoo,ϏND xNܣʻVWPt `ƩM?(ܩ  mE}mKp: yPєRyI|{'ܹk.=گz\ j-ozkGvU1KQ9^C6C6 C)TtLt3.QNToטk}^%+8:Ygl#Bz3.rio9 XJ]Fhc}\hix, ϬwkOoor6cX:' YCOz2: 4(F+4O<.,#q0Gf|VnDDgP΄{M*t?yS4]egE^CJ)N>GXkA"?QKMBNVXkĭFB[5"&eU[ Ne7 4AQ;X-k`Ne3r]ItG/f,L &3>sRf*&,Yp"Ek)-b(4aGD)Ź Xw4r@؇YM6,^kID*<ةL~V2 De &\߆$W%{˛l6z7N[L._9 ԰_;h ɌJŐ*7(g5X1|YRwwi͕JD} Q~dq]Mp8wGew)#)]ANnN>/^C|0 r{W2INg qE6`,ɨy] _ %8*Ac!ֆHu/ K 1GQ,"T@iN N8f`/Ϳ~4}҂P .wasuFV-+sA#< v N=9\ e_14g\O,y%Xe@D-wV+y~+Qյ"ħ\Q>|,^xW.B EG|JY1_TY{֎g|>ipd,sbݯ,zPRq{SkVu٭^) J,8sQ:^ڏ' [BZrvyϖ~S(/mڬqY^Rڸ_\e 4|zKk×=_R6m :߅ݩ_LF'*'3sHM@41Z%u?Gt<"|-w=gt F}F.243uE2`;D#ٴkRF䪸 PWJ頲g)iL3^Ѧ%XKpL>iڃKZPL5"DRN~)`XȦ\־XO sb|0_W4);\v5獨Fe&WXO/~۷ it炵|rotRa%MFK JxkeJ9)""XPbB-/r ?@X3*gQUʔ6$@1)KxcJTB7S2R&x`[i&idGvܵVxMkC,5&fX[Gb uI0X.˹/qXʀLm`#/]F^#{7_ՙkW88>L'n4oХsV ̞{~UC?p k~Mo O";+1Mh_t}X}K{;Px9Qq5>btm֙G7e ;Z!,BT9 k& FQ٣MB{I/bV*7?}t27T*e.uI{ ٙ zൂ]^_ 07 ]Aoȋi7Nj"4LpSԦM[k dT#q2(MޯJ~5̓i'4>OL1G7&嚰EOmgy К军Ϲlg(;< w&'ן4 `= l'ܔ0ri 0xNL14Jd1Ip3åKlxX,!#*j仲Al<9 "g( kag:ex}+-&-m)Rl"s@] ~Z(w\@먎kRK^@^i0w=r>؊pR(ȑn8 }3ՕKִxZvi׭+ݘoYi\ Ld ӊd RV;טs"ZoJ 0ƂxFULh8s' ^ܹBi %DWZJ)[ 2j"1S~X(Ye~M܋mP ( ǙJsYK*%& NR,1:f̘XaCb;`[ {9T7倌#&'&Y9in ^v:/*4j~()c7Kl-OL[R)m|w2mK az8A5Κ~V3@s(OkjA З7f~VV 2sBN(B d 4%9iH9swBY$rC[8{ ?Yrv4m%᤺f`bh];vL\3NIɽ)#;E_D#nH]hD،ƉEj({Bҭ)$ui<ҽ}A`q:xV5[4Ex#nZ%7kk܆D!D!)'v-dF[ST1&: -8nbwU Ʃq5 J`Sޘ;o-M nZ~7n`JkBcHW}bRaB([R2$WyI57M0UߜAhLR7qO-J CMV]MIFc|ӳq؆RКnِH\&gD`.j=(+En)Ypi!jo^ɀ?Aѓ" 2)Xa"mLXi$%J֌p$|q4uX&Zd8fa%^7LfΆ#%V~׀lXҕuGaP%$ai0OөQF6Df\jЩ ֝Xw@R}EKx\f!ߖŔ ^a,?noUa%Kb4q?<7(-׾Q/ոMˏql${vޯs@ E(q0tB\(V!*kQނSMeTJo?gߠDGl [.e|!8b@XS0Rlf:ɬhVtmB>lg}یoÄ1"rXK4Y]µ.Mr" )a9%Ba[Röl+[~Eb"--\0@C'33άNt|ۙMWGdFɰgԑf=hz8逤!C GKXSx\c,!^ntL("s^kv&){I%N*RXňH"<%%,uc1M,%Tg'T+fϮ)A㴤UbBaҿb{`Y0!vӱx:NZщOˇWłF|{xw0'v1OADǯ}EџPdm>xO~n :i\+|STgn,~~3d^N<8t"\a%|K̫5lw̳̃T8אC/S_=CT TTm:vR x[h4亾q\*> [}K}腝 ]i"D<qAfQ$n0\Y?{?%~uft7|\8>L'n4oХsnV&c9ap U7}nc4O]M)%myȭ nz4n\jpXQø?GglI" ƚ8ᾤ'|~ 'LPuTObbj}B]Xy3jԖ6XF޶s_h&𫿗Q['}:9'ݡ|UqL79{;{S8{QL_N3]>9f!;NwQ<|%ԥ41r.YU4SH3,T%r-=tǓxBOn]q)|eq85VY4nas.$sF!MmLPL#eNzOLEAmztSk#liH7zϝ-#||=QUUˏ<²0jYV}!9wL8w';O~4^MnH* Ud@&2ZUUӒ_ҵ%oȺ귵{Q2(Loԕ4\J<N%Uު$܉3_F|&k̝Uu 1xS ^\#tZx Ȇ͉(Lt8RJ?{Ƒl /P~0.7 6X&_6i1E]!Eɡ4y ${OUWWUwΥLG7x gVqdRpi@wLȸuJ$ӴlSF s~v}u#Xs}WnLy^|8e^]Ȍsw?iwi,4PR\"Ҿ"? M֊NxJ$:cܸr@E 0ܴZPIJZӮ`&wN5Tr[ڋɍGie$fJt =\Z!{'1ߛV??4Ӎֶ n˘-ªFE](=O7(m1عq .!g.X`Ycƌ5Dga dTY!tiך-pEž1ڦ*L!>>>HgI^_>j%MyYΚ>xq AZAQDKfARCbR }Ϋznq^P}6PƞTRiNd9}~tsEP%f$E]X(cT42QW;8高kaCԶaT rIOHꀯy;E^t uH_{t:Ә7\vD+ =JuiH֓Ss^L|dwĸh- A澐cS;0}",R_nT6COY(#VӺ+0n:&]M@: sgN^EVA1tg0!*d޲#-(97Irw1W"!9t$mZ&eulXz5a*W#I3Ø}s 8G=dx椹Nu%n>,؆ZIJG&>wGSo{>\Z?T}iq .1E˘bOE.I$F?ܟz*'Օ_w,PAp磧)nʟ}|FWQd\u?}=)%-bՐ4[٫G{[ }aUn%nst,)07;\,sLwN11&V~;=k>Z vo %5~ޮº{ϢJ9D|9:3p'Gзi'tvR> :R!B8Uh^lp _޷T[[sBmng 6wv L6k-9<[<yt<X>8+PVp&X[D$Bs(uPeuHåd ,Fӑ`jJtL#Gdp-sDHlCHUxV%pQ'_?.t2jŻ~<*`a$diQJ2!1QX@ wѡ2;1 CyeuFu"8oLC!VTJ#x[Z.q B n:K<8xmdQ^kɌ<3,F\ţS4&:xt9VM`adFB=EO֐"e܏vˊ~ߚeMg~@|VaZK@iLkP6ٞr$RЪd{7 5beE0WA,MNg:L3-20(q a:IsB 磊/PPW9oJ5zh/9ynG@- xL +pa!l6{J*lHx>ۤ07?= 3>}Ӄw=(XL n[kjH]UD'4fiHzHz҃ܓoE]PJtVEq]o3:"H{9svPNkH-WIP?ޤ $HeE>b2ĦmZWW X=Xlal".5j3rX¨<--F%?K)0~ Q- ዣy d LX!X Z!KJ/EH zgI⣳zG*)dKq]ԨJNi4O~>U?WI1{q}w.y{pk/lM, vS[Jq'iDAV2)!F0gKqx'Hbߑ9&3n&xkF!#A$Ͱ2cO3nt0aۊBâI+TIf*.RˈpIwK6yj(TOSgY"OHg%穦Z4e>Mm^`} ljvbe< gu2b-y+Bsҟ"QV}SEZ #҇2W|' =ƬBq>$9 o(?|/vQ 2ʗdO0kK ֬m\&zМ+Ϧ=5tfYW4_A[חJȎԧ7N'(Š ^N;Ry[ҞjA2 ġ_oM9*ڭv) q[k/PG)|OȪ\Ko1[VW<@/GrdFP6{I77 TDz婗KŅK/}XL/8ڤ '3WەYZUSAT˝FT竴Fú'ZbAqL UV%Dd q$Qœ?ˤg:pG׆,UՂNۣ@YMݒz6}:|o!akF.YipԚL2͔f*.湠"a4OrMFlA(;Ӆk1tKal;AP ;Li3ڼ?|MC&JP. < y(Rڸ6dRh'#HCk8_3 }`dcy͛oޤ|o.[,'p\Ն%+HH$02;h!3Rq=x7H 6鋮k% 4 mvis0 =_~B•Mz[տ˿gT~UEՐC;׫uq{YD[=J% mqg"_gAymMw?Z sWe43Ǎ+|c~`F#M$=b+5V?ԓ 3#,,Vp `ʚt0F!}4-.N=Y_#mW?W$,Sn,R_NG^/PJԯf\J}n!o[MHߺs.'GW4QLv}Ne+<CT4Xz&}]|e6I\t Kk Z"H=W'$ ;*;J )11ƪ)qj홡1rs!T:ybZqN$*9**0q{10>v\j:qOBYt 1u(Œ\XąS9y)\{űªT z&kP E=0 ,TMuuexܹ h0wc9t 4t? 3cw5"5*UW_@ 9^HyhY)*-#f$Vx2Q85Dswц"9AT )pVZ:/An2F!FaE7i-u7w|nye婃JqDBX #jmN@qlr%9HN 땃*}|QTYB%#~+P[I`&G0&pPrX3ϞrcfY!ıcX4TJk{|ԒE(+v}Yޣ}נjQ;Fee06 -Ŷqϰ*:60":8_S(_Q^[j\ֿ !*H#ܸkAa X՛vbX?{BP,Yx>;|o#\ΔJzˢaw9L(6͹j9Ӷƿf_q _F*IQh{SUP.d@ɴ5* ,qYB_{B_yxrfRKG֭!=~ A[- p)TwSu,NB̾Qvk[U{[}bU`oG)cawJR.J.ZЋj*븜::/bItE[m.ۺE[m=#R^N?]J;~29QL)ǭsHpt16H5L@hm:h3GcVᠴgNMVaON[c?63n*-;WwI8 04 ;\[~=w!E|!xL8#\\lCPipߢ &ח煙N,^Nnˤl^iu17W;[ˋW o2S:܏e-?jcX:wRLE1 ^X潑SdW}rq7]y0 n_W$wW,n/_T'*Y~EV3_uTz%W#ݔN_PŠҩ;Fvsh-jOtkb<%z,9\LU+bzY߿{Mi(q\wR )~>H$`r&~kB?~ibf$eN7ʼXdUj2I%-Iz28A֨H͞x UI SdIE_UoZ&;:$+4ϠENe3x=vhyqvYB1HCPahϒRS|Q/_ڛOt.d-l [@۹2skJLL.j=Kk)8{V Os/*σzK}/=$sٚW}ݨC%j5 {-^.N'#y^klQ*vbͩИ!a0ޗ+]pI2-ŴD;ψ,<\BH/8;Ǝԥp`3B*6^no+҂(սJ7^ow>%?KDb?~x}gχ ؊M>^me+ǔdZ!ǰ&Vfy 5]>t#TךbƛVQ?5aoGC?_7|55ttK$>fzy(H`+\Z*ߩ$1-mO 8goh%(^mE[bń<:m|EC`՚܍C)B;1x?尻DQkgIXGEH ?6-^4MRboLM|/`0/5'd%gIy_JˈT"y 8ps1[e R9Q{ˑ(g w$'BkoN:[Sgi~'X)N ݿL1ʞh04JB9OD9B. H9K` PĄ7+}-8mHJHQ#h9+r~>[cStM)JDEe끻/Ε<,?<~㍌TH>^}UҠxz ŏr{ "2`V@ǿ5͗h4~sl׋l+ޏ>}${W!{F,C1di3"v#oXIްE*qz;;UWXf|z>}?sSW4.?Cwj'#3N>/wefp!Zb35+/UB$1G.>STRy(&TӰU 3t=FJ?Ň'35lyMȚ<ߺsV'GW4QN?h=iv{v=4bD,$\$ű{ZByzcqNjD |}Qy2cAq *2GM84 GAWH֞EV68зkv#zG kH˺{֖jeT΋]`IIt5K  e@4Iޤ-y ΙjtN:HQHk`GT{y;߷" M b,σ?-%78`Tԃ,lK> +y*z6e1lS%"L4GB~Ql(hXG}G2OV 489])U*VjBktr&G|G6g\[0\GbL$h<./Lk";[߬+u%Զkt[t=jTv3lL_\u˧E'źS~xǻ,`|i43LQBHB#{j;얄|#?jUtp̢dʨ)7 c~g>>^ng_V7~z33;hY.y|7!9̵; dkfX]^UJ:=z,:CzZԸKk,HR6^xgpU B0¹Y0iNiiOڮVֿ#2LjeE`UϷ*}J(/G-Vu`ʾҍ_I?WA ?$\u"sPNh їUu[@S0=m!Xw,p}&21AqXcbZ#(+m׎"8Azr,ewN'1 Ǣ B> ˢY+Q7(ڣI<sJʉ2³zXGX 19}+TNXІ)eDqh{p1tyrOkx4_g]O *66Gab?* =KB6Rx9>xdGz׫§-e!wIMRJoQvr O,Gۗi!ʼ!zƈz{x׊c&`{XJ|^4/{wrH}FOj1?val.뷾~q?;0a De}BS, K 875\ u}:Z7R|o8b$ޤۛIL@noeryswe<Aɸxww8(6xLK&?%cT.C*]!]xX)6{XƑCR<'oO_nOa \ܚT~Hm nrV%Vnm:&T)~:X!Y[D'Y%Kk"/[E@XCbSJx @g:RZW܎D7Ie~hTzKwQ­u,dBQ™LjIqX3nM'%Y`Jx;`BJ :(@wfohk1"mQ=gE=`J]MV?&~mwrM:.Y irqj2:ZmQ %a7ePgm,Pg˼F1ۻZj0Ch[Kt˺<=8~ !IL[!XS& +~7e &/O14gɆu4]1C57\wBQT.@[wКR϶[4Sû!G`#:hk(5mH3MHm=4#L28CP ɠg= X_,eW7z Dn#ބsgpPl]gYb5g8({ӨeY{q' 3PO/VV7T H#7: 7چ\:˷ăZ=8C3OSkkW ah*b`\Tr RAK-|08p~‰'/oƋRqŘsE u'Ԇ=R*A-u[Ng^ഋzAzgXHe^0 *}U9L{D AJB6lI7 u2]"}&B yaI saN nQPc:.fP$bI nHoԜC) ҽm (q' ̇0aR亜ܺaesDpVgE&SYn \^8E^13"g4xxI*S%+N4y+n!G9&ZȔfݘ{AisȅXpC/ ho JY! 7EJA2%!MJFQ@_rbr jUZbڀB8);Wxs9ض]jU99ܶ!,ozow_Sň֠zkRdLlE0g82B)|Eɏ?jnfT>EbV<}{+*бE-mIJ@Kp<̠Znw'V52 :`WxްVȝ6!R¢`EZ}mь!jTHaAR!VT'IZ=ւFA9P]^hd\]1ڭ9D`DIi]'۬Ҧ{V_L1mS4SI#Inʱ<~'?&yzs5~f,ug!As{v#ugPbYZ!(>"*'ӛKiLuL"4,H2^Հ Γs1Z0u iUIdV%urT搚2in^m{1u5r(|N)Ua!*\X_)J#k8Timb1_ǷɞтֽNyIhSbSi) =3\`<2:"R6&g#/I u43AbuZ.pq\ar5Tyt0/%SR0YA]&BjFj36N\X)%hA$^D *HI>Ja(L#,=Wnl]ѡH#цvİh96 lQө\TE "6Yiae˪Uk_ู)̢X O{K [ްZZ!>9 @:`J&q&8shZPW5'ERFIZMBvSo[0~o;+lNe8#pEEcDp\R_]rhz|5rXVceLd"pi$d(X<;!'})p:뀖v]ξDIꉝѤzm~Sܥps(JtN -B!i4+\XU@w3?ĬL7*Ea&6yifo>+&O5σ5`ge2Y{ˋ5Tj($Qݮz$ "#I,QVIb*+Ӌi1gA>.յfW+fsjd{^Qu,bjL+Ev!˫E3S .<њ+䫿hO(4R#ne'*%V=ƲF=^I 9/O նi]Pby&fnƋ0;ReBu/y)GgZbk鷋?]%|&_  V[l = *p1Áj:8QKm[ ;9Z]cÆsl Ռ6Xi2Y vleOWY.Uzx#C}f<\'wyi\.rFTԕgX0V1ZhњS_X)5`h@}o~qPwNI*K3N"ZF1\Zh>K֒<|ZޅWKگCv2úCLyb=YĘSh(bb$,k̂m^>)3wie֌2KrԪŞ08+pZ:]5c8 J cDd? pˏ[/"0-tlEccToOR C3dGZ!lm(bFC3Fu96襶NhlRJbRLSE:F]8e,< -TZ֌qKq'^IewyK@h;0j2R71BdL*/wMZeObb ~АDHAD4 \l|)A(kM]ױbxqQ.[(?~PF{kUݻ7磒aG>шh޸@ފ@GǕP pXjW n|oŔt18iH H9?LPp";(oxћu)8 ‹k^7AnHIAb>(ՠ;/k/@_hZ\o}WӢvFI ns\gn`@SRVK9ߋ>T2cN7J7QJQ>ۢuןz8OTi, /v҃ow(ml)/6ظYiPeqqM{lrțqHh* V"i\cJ#z|]I [ڒe;e븢+41;GR3ȾH:7]wh+T223@#&%6^B>bamr#oZi9zw=k4X~MC7ʎ^ MF7ښ0*.:-@@ 犫Q#..~$v/5MRCJfF G7:l|t!51epyߤI4x,謵I_Z>TXϝ0!D+̾_I'ބ;}DLJw;}NBS{Sy!M);+ᾳ;+ᾳv /nxiDˌs޻B)RN)F`* q 3'|(,]&/ge:H~ìnD-iiflyZfw$ 3lBkMo9.Li1(/LL*)%8ty29Xj#'x/H̞K]EFEUDս1x"f`6j4"!{ ҮvVc."WS@J4(!b2ƃSoUF [ڋa9Hѫ2>C8"ۯ~0J€g0PR/oM Yk<]9+Wo՛r}M $+C8JLhA "n(\ }E ڲR؇$$Zp%lt=/'|a"6S@"8r y (Li6 :=ImjM8fAte]u)/n/!3w;!ԉ/hfpȈmZ1Z9J˨M1ύC3}Oi6vxD}r}s??\\/y-oˑa9/t,57s ϴ'/*YMpd$$A"lU$i<$&hl4D)RpEI#Q*mx6lAEFf,9<63Z|aQCEk1,¾NT/(aV!b9)18I-"#<%Lֱ ㊦ZvFON( ,BHao{ >Lp(.4&O>*D|NsDiZɇ=--!#2xܰ)^l=6қ,͝*6͝;Uث9%@R+G1,,=SLiJb5, :xJQ8Oݙ"(gEZ`% g 68C37!n?5bZn{ -Ѡ>\m _Wf}\ G24'wu9)^}lj8] oD#}u%7ͯ׫/ dg y 2::yVqw'^=ˇ߯yw>?o>G[>m}!kkyJGtqSW9aHnk8K#oxbAm\VnuM E9`Y%@ՠxjeA:EsQ5ߋ+ɗO)##TGQʺ['[StnPI˨@ϕV!n:WW#7vu˵|Gܗ;k؟_ %FVΖ;¨ 9aGm '1_ߢZ߬D(m oGs2%]6G hZJS(<6, mF myq8QE{l[G2䬘p.ߞ(@k"Rcߵb 1@UJXBkoB5/\fjli^sWAlL* hW^-ӈDRu$C^;JT58$q[@5aQ#(7Y/VbEF!@уK#?0m?Ak^*=xV?F7#~6< ht|6|&qޞ엍*^5S]_or9xHq)Fqa/Q4W 0L*+̢9|l e(%X:HjJ$? WRvI@%N=g.ČV q9s q˰(6|"paD*>}zCJ ̎2mB5šf9E8_6@icڇ_?a9WDw4AaH3R5>@Si)60d#ZԳgpp3['[Tz0*?NCT%Y$ =`xpFحABg3*  NyO'>/?'e-zWA9K_õPH‡ّ>^ Đhac D9xylnv"22ʂb_zlgG؜|} j Wbth:I)fIʕ1wK!%PgGx<;Z5DʲM䃻INc;C;XGleO?az)o$1 wx0&-ExW!Vىd T,Llr3 _ae~(!l݃O؉BQ\Q`rۆT5>Qm[z0eݔCG9ڶ(NԶ1"'Sy%ߩx " ~)T^.uQީL) sZLZCUn Wuv1ƚkfQ7c2"=08.:5֭ؑ,b8@~3"y@]rk3W]65WU Xj&(e9ϚQ9wIKj5ތü:H ;z< k>,6n-!5W;er MX\gH BCu*J,Zjg[Ijkq; p( öۀ=or;{:zt1cYqivA僋Dd Xm@5Ƭd5Fd:-)Zcˎ``}8b/ FqdxL2:]1_; xDH{ڳB"eh~Vp,QýC<<4Ka{g7Xq6LIC[YCM O(x&?J?TBc,9Ut@j3VbG@"B NrTܗp.j;zuEVHbwJW”7ԯ{^&i95C1Z r Q}޽۝:ö;TA (CR`a[u下af¶Nϖj3{?7N 5CFMá5))]) NEnYzͰ%OpPBF$p9%9hX\HPXp =K?6[7\,I$ :^SRBUT*'2>g^۫<{}Tr% CBJ2B{ss폝ͮn=?xiխHC9Ha) PGR4P:(uBKPGJEMe׹}NJ#9Vot(3.IQMfxJոJVq9fS^ |u 8SlO嗃;(1{MVR8*eT4Z5X8uJNVbn:Z}! )W$ "3-rB1#?/DБ>TQPJ$,R3.4XZQ-n# J nCRO£G7|p Ն@CQғ})D2^j#R+^/ A>)AI~zwX0J':k/`)V OU x`P*}nfٷ푁8#}Lr!,XqȔ_10mҸxp F,T6xMZ.NfJ`s"^]("Ę n=3 Kn_=KW#1 t\$p@f]P*gQ1|Jk4j3S4#DZّ>^,hYda-^sG9m}1IMhj=3T FvR3X:-Ú*g[(ǝ W"AhcS.Hc X$ԸdpMaH/ OB xeg3HkNHx 7íuZ[T7`Q&L0ŕxDcgߟ1Iv#\B=gS y*eDä V?|6@&S==NpmDm.GBo֭-F/k -([k` mx()rL͏P>VQuq .؃!N$SbD܃y됡4V9qjKM=/?kUe v+2p5!q7T&P՜3n-q*+W+˥6^ݸOoE@inPkӮ!{bc@.;ZvF (DmUh`YiK$TQ-kǵ~Fahgŷc'pecJ]i]Cһ_B[G#Jn%H]rN***yD6>$PDU,CbƁ_J1JL_*Tr$[)FH|YFt6W}3s/>9+&Fc27+&"~fAO>gC$?ezIx6-eG IUԛmr&%2$jSGޔVO&5 Ej 7Pe kʟN/ I`wnp^jgф#*8 j?X Rrc~bЄ :*AR\(䕏eL;bTe+i8U^r\a}HUꨇ!iy@ŏ>mFyl;/|m$jgUY{`'Iܶ5~'bJa N%({E\1 ["S'5&Hlۇ+'$io<~\gy %ZkϬjbͫca8,=_W{um[[&6}l4_nr+|4OK\}~>}^4uWD/dCٳJ=\_Oј9LDSgC[,!S'>m3qZLE4T!_ݛS"pNtDp˰+'n1$dGo߽+&!y+ zNJ}FoO̱h~jm*Z2|9|N/ښ)v?ܹզF7ڛWIzsO!] [.Acu@tt t]YyqT@v AF+xd2GE^+'!U4(a )1يʹQ+Da{1ʠ-a+v88@&6tcI80>X R6=d{UE7Rw%3@荸'G[R&U6z;o, -͍ۧst֮,W scI9CȨ`^rD!RKOA BOC:[zZ1AzA ykVTY}trH%!NA~OV>9DC9ǟ*PRk Tgk^'LdK jNe`/j #[R"(@C [ލ !3XwJLCe%C g{6W!G%!qqҢhYRE)%%іdS"%K'j"ovgg>5~Ć22爿| #X-o|!,\.Cq,(.m vA-y&RŦP?cUBX_82 bZ `V_'3 [uiF`]7mh9Z=("o T37ʺoAپ3 `5p$ [7DqCBPdt8 MEܗQ!*C#c8iU6qf,4lj\3lgzx.Î#+̞N"M veiznUIUKp9}.OGBBs6B^Mai}GM;X<7LVkctdbh& +; 8'7 4q8|PTib!׆h?2~ 6Go?j<}/ ǫ'"iۡ<`{tcY'Q嗯=r_BeQ*xsF ܘjL\Qĵ1%g&b:Us BP(1 "RvP\)C*F4 aH( DFBDFJ K§[+C2!u2]XOƷg<; hг%V IǍ&A;uRkw^O  =iFA3g4Ix-P sQnX)у;TM8_Q?la|]G_UŔIŰ* ķF#aO+Le5*~`M#Nђ("nЈf@0Q~17`yBsotd{SI7;xE_ooZؒwW1MaQx4\Ss[=8` mɕ @jJ/!Yw8?w2HްAmZ3 K!mVHm}eR?@CL?3X 5&P*&O y·E E))o}1f ƤT!"LHP?xk5Pzxwxpe7{hOYjI'TҚ4n=ĴǿmU]b.wEh!t_rUs/QN&l)S79A;AtX@hQ-vg棙VqvMݪTrF0hjw.鑀5(b5mOU\8W\B)8G^Fl k6YzqoڛwެM\%NDSZ II7 Z)б7BʌY ۋ9kyܸJ)s 6jEM g3f\(ܖl؜w&7,]cc8ڌiƀ7-(0wۑ}_'Džk/JQ86z&M^D3B) kcĜ/ǔ:%4%ɸ6R>f@50"J(̐깶Nc\uR ڃM.RҊ'%9fls)zt]r ZJڊlEֻ#ާ\Zl~HL!;g69D(Y~2+R"{'X ~d~]a4N3zܺvǍ?GI 3C.%L:x86ɭU@a;ŏżwDZw@(KC2?Lg7p#ۑûS#G'[9` `tbxvh^{~g;ݟ~?^&E8;>MbO|߿o?'ggWo_9=M7;~KG^\cSⶓ~~}e|߯^˹gs sN;wq24_,MFkgIj;ltIOn-B7,P,ߊǮ7@#+{ݻNwSsZ]4yVar]"o<kMu:z8HI/f 9XȁZVǍUh_ڟxLtŖa#naFa{9iCS 0L۶=yJ L{4,w[ yxc~Vx1 3;?;u& ٯfxܛ@3ͮ2KEw} դvR\V'?H(C׷OG/ם?'0CDgIoғf^οaTd'{Qߎ!Wv&ܥ#n-)3ݣ^ 5oI/M@waoxp$G vK=ʲX]w*rdn};$M<;ƼHiC^svU)K6;y 3(23%S_*(M\̿ aYBIln,gms2\qJl`ʍ g=S }>֠.(wR&&aeQ*%3`\)Gn0dž0C(A(dP! 6ihLE†BsA n0vN=:mԩ돲|+~vB*|NtNh%ɮL 86q 2ܮc}NOX; &&Len{sym)S`л]5U:,wLR5yF!YC?z Bm ߙ:;[2EI)<|q m 37rp-HA±Yctc+M q?VFX,w@k-s6[pŗo[)#Lk a,2QY=~ƄŤ{w;q+P2ސVZ[`BV#ˡ?yyzvt{T}Z,AZ؆ޫ[#KQDxM%aץt&]H<Esw;sb.G~54ge hB*BԱ }0Wv5ҲŇK&MޭbZe\dիĘB-2fu@ggA1ed6d?']^VKAE|U___Ћ^g 4c ,Fx^lldP7VgӭĴ_"5Yq)$RmMT/86٢y>RwjħKoj&8<ZVjEG[X- H3;ۥ0_sEq(.j]8A~s.V]#?+|-w>#ܦY[E9wx7gk*6T܇Z 7wXt% el.߽:brCtGPxchBئvk M8GK30XvU.UͨT45zL#Q4އ ˺wliB1v=Fuaa ڂ1x3KAt #\e<Β }5>ho<; =oc7fi[W,`i)/v;)F\m7@m Qmy(pQk \l XmU{-t,Hs,T1e:Q!`@PԗLQ(%T>5 8"&ǾQa?WuE~%161:〪(TX*4B(0LHε bРsh,KiKY -1"'1 %Ɑ,qA6$X*\9XIXK(F)+Dq M4!XdBb,0 d4бb#L(#l9J>o'k$&Zb"A1A,ԏbT07A[U<3* QJx LV.YUIB .%]xIztJmUfTֱ ׯcLTYGhdPXxO,H2)JR^4ԭg; gd|z\o(G&B=o7"p9>?ixq2LG|r_tnf >ٻ= s[( k fwt*Bqp@6]Yo[ǒ+_w0z A; $/8Y%Rž }IZ:"Yyd!IkiC'(l tcyO &&&8\'F9-e 7]ٕ+͘}.zׇE>Țҍ=C:" }߹ X%U;{ȼAn;7uFVa@ gRq)Dj4~hg hMpˈN;A`$/ȼd3CBrv3|M=@+Tw (F(ٳRN$hk֢\ eHSpQS7 C fb7sLi __w۴E*UwI^>т+y2/1cWێ@ѹ"7^~Jͤ:GaK{(FPtT/uw2@ulƨ֣ ʝ#Rz.3TCkQ>0eNB= N(*@lQoEqEGV'p=!;]^ k"W/MlT-]bGcUD9hF @o?"Ţ#Fإh]}z˩.g}tZ[`ïiWgz=R.942"{ʎvOrYOfBr*^܇9͉@%\X[I$\*mBsMa8LQ!3$h!JcPWQSuz A&!ăl0x% \*Cf(U`鈪x VDF! }<T #hUCA)gW 744 ϵleEWD8)] ZL:.q\O-U6IHCk_s^ U6 ƃwFQc`# #j/ dXpNK{g{S<:s0B Ց\pA4W6FXYvK˦z(yE Y$9]h}2Ũ#F%t4E1[@(Ў_VC~H%z UXU(|Jz3+IJ{6BJn9gP'0$7CܤgVs 6a@UKE'h'up(-K*tP[0tAƟڸw$#'4?YGfm8-Ԟ9uo ӺWP1e1Q;bs hďBP$8 ri$wLG@QӖH2N;+ea5F"bg\%, aV69se3 0ˑ~0i T9bs !VpAH`;YN' 0L{0Q"F9ZTDc^C/8>V ZBQ^٦&0w' )`í~HM;@&B)4 `;5󒈉! *H>5*T ;c4\-/RidA:DMҐ<\9d?>B'KXy򛖋H+Ae2HeR[e;Z,VkIpr4SKw x=w72Y\_~1G0Ə_}I'z_(7qSs9x oDGi"(Eԏ&_wxU[„Tƒބ~j'+wOâU=Lfm6Y=Ar^چǚBN9B$'bpWK!?ay6gL_hk~λ˯~^ff_ߋ>Y["tjC{a2*rTI^j|8ܤʭaS2 ̯}yzy]CpJyouEwfiwj}uK@:[+i,Ľߝ AClW'=Mg o ϵ\RwJ݄ji7v4UHcjhC5ϤOQTé+ TCq7T Hty?~(.( CD5EZ(g`,WQnFۘ(EO֮.c6ON Nߍ{dѩ[8fk~rs}ڼkA:7 7=HM߱}$0Zj9lg>݇,rjIB^6)VWhvkA4}GvB yjxoڭy_vkCB^6)(CnRj5i*턻#U鴻kּYmvkCB^֖)JЉܫ9Z_Lu(nGu(E*&^]|*Wbꋴ9:ؾ{w*sjB){jUHd3xpĢ(#"#`xt.pm\a(|tNjj趗VWmPC(&aL%?Yw|M4~P%ȕA뺷 g̥q -#@@vd""8PrȘ ePTƣOl&6k"cYn+_ _nh8e  ;@S UMKJ 5/uzf!\mi'4wn,~1uhWK>`Q[;=j6l޿f)C{WL-Z_Roq6OV@ NJu Yŏ1i3V9IЮ|W{Oϵ= x3g Ͼ_˲z=<!)R3+Vmjeŭ]#L{ۣgJ.1KNpљBLL8DAO(Bsqb 3ac/[F.Nt_H1MIŵu)FP%pˈN;A Gj*A k/?}*.\_Er!ގ8̘f'SPU@HOrd2R"DCHG;MM.BForÅ0>He\ PYs:P]9UJG>(Pfc$ >Z1B)B8A85ZmMT>]d \kBcW@W,d`:VRG5iE \[(""5L(=Whů$5Fe^mc%ͯn䏻4Iv6]倒;<.%z7̓ Uf3.zn#!e%+5O Mom@X\}ެR bBURn->MTn[A=Oa%2p^\xxitSw.TTL*oėRN|GV?O?Se]\@~XIJЧZ!ۓf'ZI?+?hc ĒM2W\gz҃uq{⹵eU"6}bUܞ\s'uJfZ͋fPbNkyQ|nR1>4UgOtv=l'{uQ)3M:AO!̰V.n,fs0zc84u8VІ ;iJ<(% eTTL*~IsϮ'Zzw[_cF[s||-\ ιF T``#⻹<7V eg$#ꦲeqJ᢯OS3Q%S/,)Xd~JxH3"zijd}};(SMAQwٜޡ.ЎP?]uWP ش Ѳ\X.#X&0]ܕx&.8L?y#\? nWUBBSgU $eTZ!~Ԩt2CQM@ҾU7}cY}*pQ͐8T5Sj5SnIBk>yMk ,4=A:;m* T.=@칷dUo }e jP %ѢK9ǒ:.m.?U!*MS(T 'P&9T'k&,. WI`ݦKRKuSxA!Jp6js }$(!cPE&FbJީIFWYW-vϮei)7iX {ÏQuڴ{bMc h)˯W>xt55é8ud']Ǯ)]0wxnD`3bwa=^[fl/\}4%C. ܘKk161ZR}>: V9g胁!r0 2q<xbt}VO8sϏ9P#0؍~IG_m@ߡgQxbt,1BeX_ja cC?6Aq!]>(ll&,V͝9jy/ݫx}qdTMp!k /|b/zjzwUq=Z S&3Z߄L&N{^c-36*- LK0oh ^-Ku~Tz6\q(=间 kG8 9R*wmJ~9ȶ)q= 29'мec^K[d%[2[}Q֌fWdUX}: h8ek J:Zã쮸-wF]@/}\yIې32Oyw"tzy†,Dl7~o4i|W ⺠ڕ%.M-s:T)?3&Z1Rs-hi@K}>Q?#ooz/}tч`pVfw)>G|c bqtmC5"ΊS\6kEż׷b.w_ d+}Bj"i&u|~n#sέc^M]U@4hZ&BH!@SV+@XKeO]kŭnבP󩥄`Hz9@QRP57KUNERk\8R%E_EmӋEH^\^꼹)Gޢ0֐Gl>{ xPBY \)z5qXTJ H$6B8N(nZh>yC*ғŋ0wtw)U2;n o~bx"ny-6ٲ /Ox= t9= 7W?iX]ٱ 0(Z)J ]P>Pc`d71dÆ@#z !#M tr47Z#s- c rA"ƹy‹GPHib!,GMٜ6=Fd//ye5smtR!~yN$JE xU]o6T[x'ҟ*X*KX@[];U `Y%U3nȌ_e@[# ϰDnƎJBq&ϰ Ghg7b٫ji]4h{Rrxx(՗4l0פ.Z-̿A݀xJd@^"`@ȞN^vqm[a~B儚=FiK"eT;7T+2 !xl"QFTs”1Ɓʆ9h^0#"_X/-PJEUDSe؊)$oZuHs1B&([(ma1''I !cOmɠ}ر-F@,8!klsaCe+h?E_Wn 7(Ԅ ŭg`"HN!]ɏe uIm헌RlS  P"-H<5AÖReJF &m+?L VKQYw!ںxw']'?F.vpT/@vS &AK=V4k[8PpPL}`+` cOm{d[eq NMO۬_ ן.SrX/ ѧ.Q?ZSB̌/qVL<1#<$\_#(D+;hLjgҥ39|GpYi\ggB MdWZ°Biu>wcs-mǦO]o+;.G8yq'Q\p\v9:=,uxS@#uWw78Iu9x*U+hm_2 gQ?rL9=2nN@}V3;%DpaO:{kDOߊ>kn!UAF!4J~}W!?g14G_ʙ8 d6<7ጌK4h"tzy†,DE>U{R)U{RTuaTr6!QYR <#+wFzpȤL~Nalt~{S=^LJQӁFst =Őh69} A\>.|wRj.Q4p`Uϖu)^^\ϓ=9˪p'i3b;|jǝ4e_;#I2b%*|9}sdq#,u.J`b%bIQ%ijX-V*V9Us2+m{/K-sh|HTRK4S 4x`• V<+~J@iI/O!(NEիQlT3/v?5TKoz_@P֏T/{W7WrLJG6\3W.6Éba,q{BaRr| Uv9ürE>s뵯IxɪpBilM};w{eqliF!O& -c4#=Wn2 &-ۘ ,7hPfQmggσ^W#8tݧjUn:{'PbOoYW & 3ޑ2͐VSn8VSnո[U=xWvLXT.*J7h&Y~KTKN],YO>xo:PZ3Pj(/m,gRAEQ+4UQH M״֎mλ3f'Ϲ'snN)Bi%$6828Gi1zG-VluIjq9$*o9圕6 9+lԒ"iz蹈42Q!XǬ#L A("bbtpo">X8NO-²4,G@/b[^=w ۺīx|Gc,ߟT.m$ͻJ,o oNPcNgKNpwy$;o9T,.&ӏ\/n2ц ݣw_TA0 >8>}TT"鬪ؿ z=8i4& Ϋd Gu V .X+VJtgpk&hӁMϟ߯ԢXOTTo}n[eI}5s\ʄ9.O1<> X?{6_ns;bp`2O9Hn<-Rr&I0,U*Uzra%j%-}uݫ]]LܐLQhQ!9LXQ4Ea%I1*+[l41"z;nH=z`Il:9LԻ"!/Ύ@J7\)]lC ڞh X pwbS" CiGZQ ¾ ۚ6J)w=O*TXW{ "(naE3S*MsD|eVVKYbBPI)u*uDJ F7BdÜ!BnM,` A{6"A,6O0CXޅM(g"P?9bVH#6~QGI1X*p%bݪ*V@@i^";Ȉ@Oڻn #_Lat J顊"$`i+gnWߞiC(C sJ8WJk}?,O.F(^0jn#]9ՄRVqhEQW ,z*Uz1o ΟB(q0 qs3˨t ( p%YKJ\Q5PJ=Rƒ1;Qcl2MR$)H1dPJi g'VY (y) Z4 0DY1d*k4)i!ddJJ;czV,>Vڧ?zyMﰁO.- z䟋/~'J}\onZ?y]ܖﷂW1wwL~0|y*n)ғ=@%ViIȟ\Dl@QZrBivBEQV: ҷvk=PօEtO*S_c~pc/=w*" 'z{Fά.?Ͻo|' QDϮ16 ?Qe ga*8s&MT1,9qI=XeVˤpND7֍귷 :@nt@Hv;T{O|0(TOU7{f'CPhA=Yp:_[{O#PJ'k8 g5xG);(V=Kre_1bQu&3(1ըGymrsǙ4`\)f:nJq|e2MXJSD)ORʌϢTk+qKIqPfq(F@W0QGMM\c9[8fHʎ;= QTZt_ձg~OXV*tnV l릚KV]7L-[IDZ5u7[XUĆ U*xX+[xKA?!)OFBnP]~BZ35X(^-Www#Ax_%G5K!]َ)?͘rT0ϔrV))'L3+X!q]juYR`TQm<ɅKN:pCe@P[e=~^((KwQ49^Ur~~(}%Ire2$_`T[u7v,jnrupN n9J/h-O€@8QMIJTfP"+3 gPP@4*W5S jKJ7ez%]<\04b_ƏJ=t;֘3v0Η)XuSp0j0ݯfb޺e ﯭnmfC4ݬn]OJ;xa)1jA>?Fփr]<|+:grNJuEpeЛ=9:ۜ lWԧu=1\W {8%h#D ](&pSՃn5] (Gg⁎6< } `!qѩ!n1Xxg 7eOJ #'8)ģMx`X_SM .qxǫ5&r?AIonmU|rOTF`aTu$/dP-޵"z'w4K)NyZ@,0u;"t7~痏J\ PhqVaf/̌ * Og&(Z O$ધ[&z<2SSrec,0 rㄸ6J3ir},w{lh &WhiU/A*w G 4 t 02Pwa^#N]-כڶOcx4SIdvL∣TRqe@2k22±? ^JRD% \Q?%(E$00߷jXn2s_<+&ګ$ :]go-_|.RO &.nB/n1/4 qSR8G~2) IxS\Pň1^R?d 1/xq7ɝdۉBoխߍCٓZ6wj*ٱn/~e3#׋WjVΏ gU.f8?if$y&xǕG&Rh; l[w?NRrxESQ<Zcs"#\I֯mf)j 4S9@ 47 I/KmԁI="¡D%rsnc,/[^FX $.\풁L)2H͖xpH8C$ˉ&ΆnhOvn5 83Q=;=ّ7Yi[&?+5]eGs0$zHLt4LI gIEFJJ屭E w S֝ p}a $U\# 2/2v Jm`Os05=fq&(yp~5RBt%^<Z9ݞj9$WRc yV#spPKΌC,m\;ZA8lK5aH %+$w&K[S.`KQbRELae|Hl\K?>?~$#.W(IVixJ43ެ#ࠚ"%͐*nqB.k#TwKۮ x=DܫN3悰 1N^ IqXG2:W&'`B^`_] v-|z[rC/W|M?O8* /Py ,sW9ZSˆ)t.#c1VkbY:wOtq\C&w4r&4/r 8s*PB>USmT\:H/V !D-U卬Za:TfֻUx \ M,֢ΜVDa܌AxWDXДvfbo:L#^S9QI]N_d74}9:f/:.Bk);bNTKD-hO*&oʺz"vZ 9bk<ҥ.0 Kؖ r'JD˝TKΐD_Di87T nRp_!.*R)20Ɂ/H2ΉҖЂ8 ʳʲ2 1Lw]ʁ2[y*nu$5 -XG-8AAv FϐFc~УLIM`y3 ,RP'38R8k L*Bv=Pt̗J)>,@% Š ߋXicŏy؋'S(NAIE.`6篆r@ "(p ʎD'X`)4xjX(s^xt ;T3,\>{hP$թeMM9)3[~J=sC9HSP/ ) ۲A#*E],p-}cMio\t[rL6h F'B`Hs;ir MGsToڦe'Ye%'Z)R c&T̵v>7,˒`b1*bdP- IT:T2AҌ#kAVD;𸩖ucސi~jҨh a2=ms0%P$ARh[?KͣBT!L `cw<0ݞja^i6@˺kxWmOlөKeo ׷XDtӔ?&_fnW}*1}O@uiaq'7!@~)C`ĦX4t[ ?Z9fTcBH7i2 ng oF'ú m'2ͫ~^л9hZyʼn)Ոa>Z15AG9ő9"RƨE=W _<ޭ+[I$\w*K=$c'rĘԟ؄-Ta˼St3Θs(Il2FlǶeL"fF4}2nu_Ħfqfqj?O}N_S܌[s?bB71%暕zn|d`+=GkϠ cͭQF=}>8&>Ťv!ApɝOdt) Gx\kng:~0􊔔i<xRQ{ 9]H!Si]_N0P6ȁJ c{.Ǩ ^Y4&iY*-T3` Ax١X֛?*1D4Qو r+t%k;^G^Ya:&h_t4  X:glu3-cojGĄ{DکpV])w[ƨfBXggsZ3aN]9d r1H锕;Gfns4#O[~rfb3.$䍋hLnU95Xw]&-v7`E/M$_[,p-ţZ]miU&{F!ݢE܇p12v9q =Qh)5NGIp< jw:G7#%AFO_6$wu~##qwT)RެMJ7]PO[{`KBt>b}Ylvq5ѽ锷~MuloTi;Fݩ{1wvo{ez ,r`;wxd47o,FYװ[GЦףJ'UaP24,:H qu}^>ͽ9Amst$VoO`V5^ooTz3URAŅ\PB USTρ߯B8lA9T+;eUieL@Ai ) ok^}&h1E(cs/1jC1֌ ̕"p_%Ŧ ^ 7|m}Z}Vx\* O/0AB`h@{o"_>W)GlCp}є`9JJv迮&7f/3_/|[x̪4BX_,z@z+THJ/J PtwԂu' ,}mJQNas&<->QBF!/i)0VδL2mXN!έ0"6bxU.h)L!k\r`\r߀PT2 Fk1=mKA!6|+$ tC{;7AIj[}i@zѲ:"71u~ZtOKc|\ү>X?yG:X Gm?L>W2IcŏY<|"ᢞ*7&OW0[,Wqχ=AM֘<{07ZbWYiqb<9`$$+n0Ɗ/vh0\`wǃJl#}5ͬ7n=8$ 3 2%5M,iQY m&G,F4K| έS$3h}J<yNz pTT::݋&l؋&|"%S~۱FөŶAg iHD'!eR:W5=5Wu9J+IxGBPڨ;AJz@OĠ-7zE,<#e IY֎(V J"Yu10*%N?b+(=Qu U|}T$`˂ӍwN v]dLb_{am]8,XgSiH2G)IJ@$"8>ќ 8gR`IMppƦ ZNi_ENӢ(dN FQ-8MWs؉e"ܣV\,1cLaF2Re!JFr)R5"ՂFc;$n?ͬ]-1:؛7NG\?gllqϙn+ aȞg\;Uk'p)o.֐]EGu0 &4UpnZ *kP7Y=p8dZVk1 m3t]IThLI{sǎvìR-щ}FvFüD߶vhvK!!_,SaEaC5CP[DwբjKѷgj@sz<Ț=u!/8o[3eF*Ag/ܻ *ago*'ep*ZK]O,Ua!dꔝJDGɩ;I)JI!ɓL.bUkBimj c:>gVp]hFC[وp8tL'7ԩ:FAZDRJyу'o2 X8%8cLLI0ZR]WG_BJVHoz b΁\&;=vSw aQ聊lIr@;_`.3LC_S<'=zx`M?uAoUK ޡ,7/cfTaUu]> ƵmxӮA3Fpq< QkL}A/0eu=Px!2Łh"Jj4P=!Aα3QQJ$-r#DsAA\-".%-F9;m!H `Bޭu¿oQJ"lN u^Ac~QmU*Cl+k[|?4a%Xc O麳U2ǒp0ʱ(KhrYTp%о]U"SSb(_&X/K] U(+- 2yiJn1JM^`ø2'H(0uqB! @ \q.(H9*Yj-Ű`.GvE&S)M{1ϧjDNWdrO>Yӗ 8U_ׯ>\0[->axkg~pwCYu[ ~ ,-|Ń,mw7)X~g߮Zօ~g< 1&y/XHu+1AYn6tcMv9]nВ.7թW'/KB@pZ//  근H"AckXpfj,/kkn*pg-/4P^DS<r}i (A07V%Ў}u3XY͒U g C&$2}KXfgB[su @jl)++cd@@2-HU }>G38|q[gc~ǎmkw)'Th*M;c~l?{)L9 eX*X( 'yM'ًg$;VL>͊qLbl>cm2f|ጡ¥[ jCsVU-5dp᪃sAp֦Cˎ¿Pxr=E]puuF_"u=8,y\1 '}x+JuE[c$e)=f/8'B>y ʰ>BQiB\2əSaLs`:IAA|\(qceyh&cD;g*[+Yh4Z4z7.7p &εծK&wS LtZr(o#(X`V yZ9befѢ X5alQ0 Cda6tJn3C"V(%%b>[rAllp$Zn'(r vYXsS)OWD"E:lp4bwKY[bԵ%A6#Y2Ô`Vȼ#Վ®b=C% ʲRelFGDC^)1%ATjik}0'a9 q8GJq~8[*jp)Ask+n@VrrF9gBXo3؇x,B!;*]0RE$.쟭FlubTdg2;=Z[s|A ˆD2sxWsأY8kp#r(i0ct*+Ya[z݋w?||H:GeKGqo|惟o3[aU?_PXPR_v{ U@[P2y!+pyr)DA@Ĺ Q!7ņ&cVG Tˁ*zAE@=cYMW MkA!0ʤm 蘴D%- $48%TTmILb\3 hD5Fu4TަZJX762j ˪3Rk~><_dpyZ|GBW]v\8泏~aO&e9 h6ٓzû}7_̜~==d|M \B&B&R$8ニ WS.\Zc` a \5\Jfj/#`r]&*PG0׵)4?[q?l5hп6W yq]d!+<K4.vxގ? ˷ #\͞"gm'B˯;g`=8F%-GXѸ^^\9#kh8%iYEIVinY&G{x0os_OCϑ qw9]0-T[kΠޱf ș-)MeuJtL^ 1ı[Tk̮n…`Iwakhm5'BQ%/%'RҸ,1 OܧߪsPIȂnUb/BjfJ=ѱZjtArtղ' ?W\FD骙'62t`\vޛc݌Z~_a8^ƹ5B~V!{/C^F ժKh+3X^3)`>C?2Zd *ekF9D=nOmt0%E閽Tv>eM\^tb!Mfb>hOl뿽CXlXy|+/X`Ϗ>E%ևCQ-FuW5M*{ylzzpne*|DFcAXQX)qu˫;z2:{QY1>.Fk;y>?ԿQD$eT}v3߹ ͆9^D1ba;3_ ѕ^xQ;_ Ί巢S)!sR ]йTg*1!c˂j|ff](rɒl,W0sO>=i1((PalcK(T>;ΡSs(rL>7m7Y @;mxRSxU!K+K:G?gpNz>(k;yؠTJNO*kWug,)zo^L@?yo"R˧ɬ){_TpikPGŒcS^h eB9jpc×w"p7gVXI7Xs`S#YsƴWyRkER14=Czܷ"9ӧ̸W{C.,=4qtuJuXRu1ŬH PͤPyAhш1#e||V%o,L&pE-g A!A Ngɕ>Dt|쭠ҨF`F)Ngks*jg 2 a`bL)H`a.4yI"D-xMixԪrnJs4-Pz>άe會ٳAhDhQ1VD::9+sgJ䍙 um%O F/ Z71[g8Z̜`Tr U!xΙf,q; kBvE4~_|n C4 Ɣ˟Z^ .bm~?Մ&On9J/O}r;84kvO>%D1?/ּv|Q\H`ހ&`6RnĂʛ 8!KyG }"ȇ&n#S LٞE0+{,M,&D4pm~oʍOQ|-FQ){;HڃLJJC!{ f.^D꽛M/w= ON]j%`E S= &ч{?S^ (tJVHnr!$1Aw!XjBSҐ>`Bp)YeNR!ymK] nU=C2O[Og ķ-<] |6?xg9;U!6NvPp!^B(N}@@nf{R,&3y>_ ;'ڿ,/zɎU> K5ިXp˸Q=/E=879/@,$v*dC$'809-idx^rC uc1}v!h:X} lp7?G[1yr 9n-/VzppdIEvwrޅKezs]cFׄ b"|[]hi|TX:T-2|F[s`*qFhykrN?)¶Ya'/U777]@IƌudO6! +YYrBQXP r6a띛&u"*M4Sk ec 7{ܧWoLSt+7Fe Y5*~A#Di+Jh*Dc+ohcIQϸOwX[iY?ckYEɴEƢdƥ,RJNId@C 9kRx`7^zC@]G."p0{OF w>x1YM*36wXY;U:,+wS|Y~\/ŻEdvs<~wxbqIoUő4EǾ@W_WFQLgKM|wuE"yEjw?&דe,/nh܈to޿+SƠ J}*jk~{ݔfAsDgPֳ{LEePiTj0vҦ"PJ-Ze0͵mk*8na.7Y c)^޻oE \2+kRRK1 !!y]ߒ=Z -Mx4k,̍~њFɖyIRGQ0&!(rl zWtRݼVy$]4s6 }9k] ΜufŻ}?7 ֐ cO,bMU]I !KN8_ZGq)2\Ȁ,߈/Z),z!V0_1e6v-2/y&D8f!MN EatˊBϓ IoHOnP>%6;(6VЋtO]]OVLvv+笧3]KXb )8˹4*KVɫh KR[Q14}78C],0w6/GFb{4._ژ໾/Wɶ& HK3Lڭ;+mags;C_4, UjvW.za8lJIAI}XT¥}!x ' wi;۽2ꯛo)7|3Nr ɍ̆t#v#?\l={ӓUA;ub諣 ?%7' B廻4]<),4!-o.nvW^_˱K_&׏lAտwvphvUvrpO)^Wx aN~pu?(zFӱg#ϑڲD/EQӮBQp1Ĵq~? Q#:8c=?@.NRݾmYk} h^$Н'S N85fGfG8Vpkg q] ' -.ߩW-5;`1AtcլA_ί?x5(B16EZƂpA8-v/gN@oŹFy+x/$,rR㎉ Be&6#4G DqØ-Ԗgu ewWLPL=fi6# fN\''geɌ|.T%ZsZ.(Jh*i-CB.*\|FтjQ#- IQ6fJ`\(ټ[FX )_ٸ) X9S HѮ<»܀@L=*/jTr'LJ@j,4JkT{/`1aЕGZES 7p "[*>4j0ELE[\؂A cH^ҍ݆sT m1padF7uŨeOVpƴ[Xޭ7t4Fn͎*gҺro|{(Y+M76D Y%;2*{d2pR_ُ]/xˣ ڐ& jٙegE}˴PQ+h{+W`tՈcrF],fP_Tp^Th(=,żh~ئ,Wz)۫ t\fq%r%}ryk¢2}ڌDG!/rB}r^dRqR3'/HL϶ތz!1e_ ئN7`H3U3iجYbފ6Ibj`ՋGh9|Kit*|zHKhj͵qqfbV-ڮF .~??/ګ(7Ҝj3ǀ\SZ[6.=6d8 oc-^k >: }C'⃡ B%|&oRru~_Nsw`${mB jXၶrwn, |n;vnXF˞ѬLwWM5gjg[H&Za&ILW3U!pSsv|Ob#鬤dRW4ɭgkU9?^v2:fKE/vlbA)#sEӃwRdP$ hnQ;Paa$/JFuװH"v3.')gqmTe" eize5&7|s mx6ByIb; y}CwF׭x]A㝢&{Xmtqӝ}5רBsq>>T>Dm)+ii}Cn}*Bz0{?wO3]@0*YDV!lZYhU rzQ%9!pLH c:$d8Oڒ>0H\j)/#%#v`!󸕕A-e^!$PBDY{0J$nAjRmtjWc(}E%gZ7"&q]/fsX=߷%A&)$Uuuuu=Wh}[IUF)j>gZ*>Ö]QQ't*V)uS:ޗ6QX.!O&s"E:H^ &4Eڷo߶I.]b׷|Z7 ~n&+X! Vye#\p$_N5L2Х3PҤ& yCxDC{|^p2 b(e)U11~?\76ܗGx:$} bPW*[+; _AnO/x>p>³]h'}KrɞHD޽ӝc91 7guz?|;_djN$YHԦnȒ5))&ҩ<1bBD%[#~4ATw\`PSk:PTo>(?A>oQr/Uf"̯]8A_#ƏDs6،^E %aWb͑n/Tv!b|Axumr8RSjW [cwCw!q#};{|?2YUs3 blX&6wwezb[/UW%oXƇb..<+pᴻĢ%8hyu\_팶] ]S 1PkԷFw@i?s0qit4ESX# ,4NR c:$LJD"OӔ` I 0YBMΤR~ڥk3)\|ķTL`0.Ϧv9 9S7/&`'91kgۥ9u@F36F/{7C~69'G{BPճc~=5;k%謐]Hc *i2%&"HX+iT9LS*™Oxe JIDb~Zuɾw| P<?$y) []=Hˀ KS+ruNo.ȷ}>5!b_= lE^5 "/]ߙթswW0=h<cJ Tn/!# wJgK7]tQ4QLmMwD?u;PTf|i,?,f>]c4TJJ~0@`n/8 չq笗PxcI>~y%U!3o:S?& W#aAwS OE BVT |J1%xj2[*D|!\CMN$r}ְclh4-lΥ5KXi44>pFf.. ϟT|.Ͳs&l U>+5U+1+X)_,K5([aӏZUL#6t%ϱt&8U쯳7jOW?¢nx!5Z iK@g‘iY1hIe% $uӄJJ8!0R!jE[DR]&3 #mZ LfrJ^%R 2B6fa|}kS oӺJ[37$RnzjjAUb4c1 }Fӭmh[?1MH.Q2UWubi7 f[,!*V&6ρڭ ELXYTm掅]O)SD7 t ez7 vG.YF; ~Au) :HlZF1ES2 Bn-MXloN3 bg Q6ll|S:2?,ٴV=mRXmXn=Ѳp6=+7IQӄIajH~j1h*Zw5O4D,*j`j 静 g* ]F pn{ |*=etTu{/}km: ʙla6 8&ÒglukHN!"͚Byw;ф~"G 86}n#xycN%M?}\ +DC)=-aj_ Җmdi2 4bKMw@gZw:pxྚfbr[Wy s* vQ3-ISlȔȆZlf,Lr%NaFhꐦ SI0"& NYZ%QL'xAZtDkM'z~ Xc4dWUe/1M0ѣjh!?|.߯U [_w!._{h"m-@3LΊUS;d B}\ BxiK@j uI+4g%KgP0;}U-!N%V`.jsp՝30MUK B%ִ!kTK쎑mUKB)0 kUKi WK->qkw3Qʲ ;]3$uoݕ3 P#NPbIJ(`|ªޤV19s>!Tm;д =Kiu:)a %7?(ȗh"=?2\JQݭ0$:[DL0Sئ*2u֜VQU`jIgՒ!UQܐ"d@>1ַ etRŸ9M2Ue+ЮaH_iJ[m4!n}83P$kkL p6;[v<;WN@aOCI 'Y] *)D=s/^X*ĕ5jL4Ak3#ɬHBؤ\#IM$^g8RXurJ"# g!/2a5U'aZ#SxjOcHG)U9C xUPX4M$D$(tT3Ri2JS%l⧒@"wZDxVX='{$dYi@iR`$Cj1- H:LUfI(w,(Jbl[+Jb%U4jz[ #TC V#ΰB Z-せ|EVmdJUi<ѦHyO$ye4nUr JTRGu4T_RC[SX{@X3$qBC|o Do:LnI^_-z-!^)*UT9ZWxRXo.+P0ԲFTÎksJwJVtzr)ր\\D*I_-&4\Ll]77it3?.1,G {g;7?WԹ-`y_u~}f2{wpK&H~W3#&uPu~Jf@zG9USJ* 12%Ѱ$oGxZ/;`nW~;g?@e\р:Lϗ sj,߯=~ X,sF`- Β!#2ab-#E(w&B71$b(cD8o[}؆qNݲv:ޟ}zUVJP:%ViCp̦4 GBgj&& g4"krWc8j7:R@v2!}kJA]3Ϸ  >MA1m{Ėg$±|BH֐w2gX7vC)B Ba #3\^Bv,7 L:ҘBY3Ȋ9?e.A@q3q5uLB+c˚CIA`%1NK' DQSDd."@+JqTɚ`S}K*筰Ll OybTjNk.үB+-{jW6H+σV م$*S-HҠ+ hNL{NuHC O]41~]{xL(*k% e͓fwm=JQYl&a'hH"5ݱ=88}Im&%QiA-K+biNp6ISf Bc=F=7hy(ȉT$&A8.%tY HL*leH`f|HlM#C=(6bފ76NA ؊=߆%FXԒ/&rnkwILƬ0q9b$/xؖRsO%vU:\;>'R\׎ǼvLU-{]u}VԷR *UKGi)VK eK}{$:a7awSp!a RC"⩥ Lafme"RdXCLA)r*Ra ;gc# tIzb"7sr &Ø;RAR7_C߯ _u,O8T=MW¶^ٓrOS9A!,`l|LmZ=G7_üXt>mn?ߔEFmmǛl$3°>|~Bsgܹl/ M[7)=΍DŽ=lʚyFMld:v4r?$JxZah*0T/+5q5i:  社=Zt  sLdByW2ZJoTD1sd˥iM&eamYanB{M@]b qڞڔ: : (DDf%׳\tځI:nav!C:};Q[lY`,$gIk:}xIB$ k{}EioGe{4dž@8[wcÀh Og{uQpRIwjQ!l`疿7T/Wo5;sD[W1U^7CvPTmπŐ WR v l̈LeZ &JΪ\]J{Q\^IҞ!&;ޠ [G^{-9rvI$ 4j_l&3ޒ~_>[&43[T7Oz~Y:[=5X$ Ͻe@֭3az{(5FES6X?6@n XMpuʊ0ˇXVɷZjҫ|35/֛R/ز~x?) 7W {W&n^ a8%NﳓNY'k3p|t6clTNxB`&p.ck>B2dKUNQǶ`撝h"/Y^Z%=ZަugX> ;_!\E딭v@ "2?7gUl|l^q-"] KhёIuܵ<q zy^WdeA1O;H,[vh뛞RKF@  !Ջ5G!|$lppyV⾟_tTi 3 ̲ Œ@B'O-sq#y"Mبثo=t$\(򉠨$LjŮ_cd<W/4n^w_]=/#Nc!~ufX㯞mcy.=CumI$%GX3 W5bƞ g Xy\A^P]j+PFLK(SPR-,k1yTx{?_}A &m wmQB%"]:B22%3eː_=nKK hz/v%$2Lu/,n Q~ # _N Ѷ.C# u;TB_^Dji.h}jN]}n ˏ;F5@Kg3 9bZte2Dx1joe=Dx tPf!N0H 4bjD e(q`A&z7kuqO=CHe⡒Z{D=BNA<bZ!7d^"\2D9"n5- ݵ M݋:7{Oogm{@"~ pT(lotDx^gލ7Wwnh]YT`ѦnI6+/J)qM"1@Ith/GK ^䥨vOMr߲mH$_6>M{O>GƁd`[a5s> K[h#vc47B!`?b@ХXLsYǩ.|^;=E)#5X/$. 7>d'Rzk|_3%AnxXD,4*(E#@EZ@VLqpB暡Ljd!NH `L ABZS6MfNQF@!^df mi L.|)o$1Qto<=H\Fy,$ث]w)&! Pvg? 굛L+ IWq{&@ ښ#-1,ˊD)"D,Bjx>dN^ϗhkd9f^xV3\{Q/R.`w|5qPzi|F#l 9]9t$\*1Nta*5B8O 0b.U6AX*3:J TJƎ Bk>-O'1Vz4Q2ӥ{Az ot{r`]G/P{|%k.z[}}^jK1&(K"r@ QWq!Y 7AwԛA#k K<~ BR:f0B<:ϫQ FBw[dn]) Ṟm.ݞ{= >栯aNk \OkY!ƨ=[9!krtn+u1tnP||,˟n/&& ,9K2!Kz|TAC-4}S߾y.ΦڣAo}v&/-s/s/!sjz_jN9uWD.[>l^P *eU!6 xJt r_Hĥ%\й\-G v$!e/9!ONg}3Ӡ&_uyK?n?w`Rr8YoV3>&ҼC6'`=g|gw:߹y{|9]$c 2N%EHYS %*4FBqc,2/8Uܽ*̈́*̋;͇w&˥Qb:W{%&Mnb?&q֒if|˺|dY;2]LUk~K Շ2@)2!P!"SPd =*yfd ?y@d.(~>fT̏ɢ(&&f܊}oOݼw;#ȡLY\dM$2)haȠ~S vx˗yoj+:EQ}31&\fe&l)dy>+{̀η~NzO}I1D\7[FlOTO+myYb1NP2*Ifρ9hT]7p&;n=;QXWOy˃gwѓ)ꘌ/b]i'$Q -|Sa&c'm/ ' я饭FI0Q_\V5:ew5R5F"0k~::!v YF52@1ᡌPyZ7Gm'^=1zDm]--&!jD5jńdW?qr5m`,Xi)RNdQm.B|).ƐV((eH @TQ.q@2) )ț1R3I9J" &Q9BLIT.Am Ss;`ӈ32-!R0!L 2a^_j%^>> MvF$P`@=eOAc#&e8j _l;kp\R0t P13lĨ=El3D sd2KN9nF)M^[gJ6җWÖJŕ-\(8@HıHhT^1$l=))v$Gưm(]&õ+T'ƃ,\ϯ"7lE K=̈́r&T&RZ4FNxvzև7 saÍAl"=btfю`^Y),W60` CgKMc!F\" Q69ɩЌr<\1Hq `2bd' Mg2ΪwAÏK N8fMD꼿sđ~aQ.'WWһ'\\ݿyÛ%U)3%|EyqP|M]< l <&4 إfŊ'\8+!;3_c oS>6x#)He3IX`T' J2M{gS! 5HkE+GꪤOܹ6Ե/Z26*GY2),E$\Qޣ0I @WnG0l%qd9Bӕ5W0f3ΙM,6`gRR:ΈEV7&qe_1YA(-}IIE;#1B_X2e9rARSl΋Kj_cy 뮟q0WgYD%b-t#8 Tv=Cc_|ىCc>Q%sx7>M3xjHҡ^  0"6$G3j+N>a_7m))T@/D?r&r:(Mx3 c=$wzY:jr##& Fֈw w(5hR݋w*VB!|кme`ST_mvxbﴋ2}5VExtT.g?frWQX3vFa'++1vǐx̛1ozbMO<y}~']><檕6 5KzeB,ԅ@,l]o'!vBc oC -rFcoqps5R46g{$,$cT."!rf\ބjVjHrv؁!nJ ri6 ظ>Z9>,{(NGS؟b\.|vY\G?66g׿S8Xs8NޮQvX k[^sO\}6zn[;Id#$/|?? f;߃]47ԛ'>EC-I3F2෤M [S RD3hSݟvkڭ y"zL-5#i)N%EJʵNg^~kiZSEZʩXJrE{z5a-4A"ds[Hk5b{NjA@lʨJ4&I|Nmf9=7{.e0H0$˕}9` ,NݠRM,I8as=aڠzK(q+=I+mprNլ;1.r?A%5ey!/G 3F،:2f-4:˹ hJB|jKrNj #*Wa~\,q{2.YNwfqg3{5V~Xիճyb笞EX1DG`aZZKKvgeQB I+Ѓ}X"lAi 7$`{}_9@/AVZ%x9D7U`fF3m(;ijT_VԂW9lj8VbRdokt+U9w5ɄM׫~댖G JmVxf8sdLA֞g5aWءy$Z1Uu+;/)GXTq7'!\-"wWnܨ3S{;3 nz9 bӐQ;HNMw..tVƆN.\ښ4R#]< .*A+?2ի4jbtqjkpT Ks:O ?BլTbbl̫4̛y5 ,]-*' O+K{_7uxI5RdG*W9ж)xHS8$kжWϩSp?meiPuw0k'ccM-on!$:QԪcV `dc{mSM|RMT-5I`tLǟJ  #!(,R!$`"wm Xrm 쵠ӥZpP] %- đťyn PB{2!9)*)eG+b)P-1;YrW 2EdD2i%uiV;ljYg\&DU+ rQ}YZsNR-Vl4@)uM %0\Hx. U*`.gIQ԰!8Ͱs =LQ)f餔Rrdk zJM (]* %RtJIucөRփ7\3K c]edQwYaqzp7rd[<$Y6zh.Q1SiYB?5p>Y+eiYixnś򱫍}|~i= [XQ$+joR5dEoa'_`l-@S,to;:h >.q4<(})O9ۂśd7%gR*YT;%F4&3|t?iDV ˳~r)ژ=ū LCfk; |~ͮ?m/MhLch[s6wڄLdFr*;:ǁlh@7# }>\)'? V6~ֲ:~:(Q8]Wc(38cPјdl}1(RF ![XGX,.>s&'Mx-r7YI gXzd4Flsnܸ'8uI:V!(Ej}}Eڔ4`#VXm'``#vhbJc}qQJw̋"35˿,ȓYURb"Ily}qOFC4(<󞩌xxȈ4[:DG;ꃕJxIrdt\VlwJ a(zi1zw7˚(^X{9sa2٪ේMjBJNfpq@w!*SX;K5%\VgW2t蜓 (gB9Er7SQL1 %u;"!i"#bouulj%/ߙ xsd"p_Du^;0儼,.^KQ;aP c՗UU9 ;aNx J}0?^\Q} $y/zaRg=|-#$5g/5]σ?:E;+/w?7  ))SàLgtcnTHt9 mQ꺝,|w==ǰThH|ڳܾ靗陻 qAEszuuM=D ))#5 Shj5H s6FH3xѹA ];x~އ:N:z Noi̝W߮It6HlMutc|eId2T5ݞ'tKV:nu̓URO-f5a,c;WP*^/聿R&,]\ǿf[E/' r=Zrע]"͇*BkjUZw[dtI9ipMTIn)9͸yƸ3̼*ob{zRL_`*4G9/>QUL٧U-K4f7OQtmGVnW|Nɇ8غۖ,V-$?ZؐXw|C1|"tn?rZ ϦsXH]dvrm'Uy_{3p8rѳ=\YjLmܿ;`lD H-__͈6ws~ ^e>T?_զ/7k]{ތ?0q)xӲ6ylX{h9dBՇfd+6 ^/<^zqMjoߗ[$/W/2O^5(ߦ OWwzS7t-~ ؐi;k/T'mCMK\; V;8$MKQ?:)nPJWs'X@|`SPu)QqhSq~Kk 1@0;>Sx;@he1h ۬~ BoF[xNP~sw<а f"z@wE,kep)~mPR-2aw6lJ}/v܏yeᆄ ]0<Ї EK,=w~^w`9tJ#]A\}_ݾ p?;Fsh yF AR R OxvG/<ى2opshMxw.zN ;<R>-u!^$.͆ M :E:eə)zSXB4w|o=lF!tW>ec>XLnDC{`ωc:$ࡳ?|5 4Wp1<A$Cg? =Y7.f'B?;{%w>2k,{xz9\$DƻR<K!Ω6Z Έ. !837azAӈxiH `5j+& [&{,ru.JJF1O5зTRlc:h5h[ $IJk[!?wȢ: 28{-'^Lg(-s iW0-AzAwαee^բA.2\ 2Wyy67=ܶnP]3kF3 N-Ә!t~ԗۨ;ٿ Dwއrz1|bcHfIP_nvhgcR8vT߿\Հ\'[ {ըWܓ:_l$]Xؙn#ua?/?{'֭%C#a%} 4SzmlS{:͚?|f}Oo>ݸ[ S o'P0O:S}Yī7wo˛~rd7W+kOYة>jfJsM5F5P3_8` f3j ֌TYē֌T`YkBv(Aڲ 8t -kCH~o:2oʃkvg<@I|G|z-w2kmר#BFL+fWB0 X3|Ha*Uld]*6t ,G~|f6κ,v;{@.:UmR~x}ho|8 / χ,9dǺ?|z}ye@C+>&C W\w GX:{ejy&W5`I\*sasY2y/9iRˉ&5O!kns̶y9]y4W6LIR%44a16S##1yW|cLL$N4[x;ygi!zɏk |8Qx=zֆ aFȭdئT9G](Y1+B+c2xB"+YB)5i 2gWp)TWRnZ:-Y$nֈu|j9bj*T38_ 䣵$۰F٘AEѯ;u勉'jORuHfBGF[,t16aJ)lP2dB 6ꅠ&lutkO}!?}bE-ӒS}fXdTs+#PT FjP@j,onE`*/R5ظZD\`WlDq("䐩a5kMԛڪfmLR%ݢ!ZbU'Y: j \M}olFwjO~ e!D U`W:Y.C* 8j4u'r)c ?j" f4d\hcYՃ"p7GWIHIMpc=k&3&JP\XE Ghtuf4ʓL?(@5"Ftue'Rq LTQkOF~ݱ6 +5:fN6k59Z40jݼoiU;Z,ݞe@hk\bPkH;Tc&-'m3b5F=x Bffr>8 ;qN'݊z\89|aPg,I؉1k>AYSx_=;`C\?27N}?-ԚϹ_!i)Vٷ6jќSW`LͨS{T1$Ssy99H AGcb֠ꊰXu uk5tMUՊOM\ Aq`ՊE7׾௶hNA1I8y  T=0V|Xу4WjZ.R}RtGFXcpmv-5;yIu,>eN~d5&/puvClXu1ISTW(f7<<wA֖|Ԧ)s )5Si}VF5y;8y+oEbDG΅'.|=fnIS ѹ _PaA /c2U4RAȀeij-T]eȩ GmgE4"DO`N-H"iQU"P<kDy6k\ffR!:L1S@@&7΅F^ hP;4{we%덆!XL6-\\6_6 )"K,٧5k;]F})M@8x9:%TݓHs"ݮ^%VUhw~+h4DNO$-ԐcH+'ͱ*S-"Jd!.LY7\}.All~n1EKN7*xjvxjbv^y&u5qXrmHg.Fb+*ĦW-&eM 6R~X“(`uF e0g\W3 Pѿ6jG^wۗ}16jݟl S3Te{4Ɵ2|ΊsGi>'LЌm7t~y;Z=8(ZeIxh7=Z=1-5<8? cx;ùSñ _auܕ!Ø ]YC\M N`eF A5xkBkA>]X(LNܦ=|T|+) ٕ)amm)md٫?{Ƒ_!e/7t MuO9P‡8H3CR6 6t*I*Cs U:[FFQn*5yxmn5MM,r8c)hU KjbTo@XL}'5#SE wQiLa J(FKWDa>R$ujB Cpp,(E :n^b$)FBϛPL9ljyx5]jc>9Z؜!e Izbg-F pA WƞN{sp޴YgveC<&bg]h#TD5"XIMX0mp m+X} oof%)zrΛse:lP_oB(4='_O0B YS߾~G^A 0#s&/x8 nVMH=- !"i(,W.|6ø75#o۫el~ 1+gzJUk4$ĴxRb' n|70}%7 `'#luf Бat:ߕ.?];-<&krU2޼vq?ݮ!R)Q.%&<,G7O 9LsRɓ,\ڼ^ZoaA.ݾgn4}~0!idf`o V>ll,Y'܂/6Mzx%߿N笒{jH]m9LHnjqb(8XM|wsT!srZ=x8}/rZKmXme?Rn;۪5eek#MeG< Jw^/ٙS/HW-p.Mv+0-Z⍢a4t.z>%'Y1'djTn϶B)DE9Dpuʑ!`Bzɜ:(Lti Fq=EЧ6&6qr(=jFP譔:4pP1p%AR5KFVeіdҖRKqUZhI0VM0De@S ڭwg"ȳM~K;#so#w= Id#ю()Aj8bݷw-ξ%3=}~4\s~p~3-~-smh9}}J2ª#l(vqo}}l[<*0=aNHw5FpLq`7f~KCpݘ8:č)qT3b)f_jn=V7SCOD7!~!,5ޠ*4Fi̎,=sN8^A*(4Z 8og>AHL$Ҽr8`" LjV0]Iι'aG8D$\VڣjI]։ kPt+9/%ԄQ:KZEaX 1̡hxtGMaJz8e7#[~ Mz wIv>fѹ1u8*Xul۲'ζyJ< ڝEr0G.H8ur.zYttNdc*urZ>q/.(GiPF(bF;xcEddĈhP@-Lj5(d&AP.QTNsk4b"Z Nrpl5韣>7ᇏ+yÒ==0oj~2nO2$Ϛpk}hm-8(^'}R6nF 3Ep ;UyffʹǴhȱ *ЃR l _Vl)~6;LU&E6䬣6S V[W%^\ypfh:P_&<6%>UKW6!$3NßwBHw6^1W? v듩LT*pSTP#z@T2mA  /4 X`5ށdr޼ުEə0CSw2Xq67f8tjJ(vѾ:&pU{nUo^=[lO;bUupUK=.;T씒Uݴ9ŎH4]?Z 13Q}0%-^Y9yl!JrHc7%JQY`q.)bCQ1Qe%#_,e1H:F0Z&) );X.P)MkPV*Bׯa~>wGĤ0"8?i|\^\>1iTo"}ƕ(krWD |wPmYx{ " X5Wz~lɤ7p0Y?LRv_ /)DP%zC3my/otJ0i}NC k,9ꍗ|4w%ĶOIBoy"}23.ɻ1Bް$}ĨnsиPJ?M5_B.v_E?`HWj*[-!XH#R ::Fyvv\7j~ 4j yWA} $]h6L9RL~bF3l@".E{c9HY,qRUVAhXXF8"dŨVs=xE䬢nML]@RvmYjmh~u)!e>}!)&-1)V-p6[t#ǧl8gjA >pxrSbC} ռ/ٺL';F9<t|!ʿbQw8 fR:A\aJ -G6S*{=Srb I0J#eb"% R[=B!*u& Zk=EjIG՞XkO,3(&H2AXTdE>Rj8R#aYl@CquhTpcB?҆#= S4D9B 62 1CC(7׀ZIjRl-ȡ94#8" p9|P$Uԇ@Swz磌/’G}0Fp8-gN`:VL@3L^'{3?(Y/-9pw!J7zxZcxf*9h5, uEt`ZhH9R԰Wařz $P5Sˬss-U|Z2eCTkisC} ‚_:eJ4'7]r[v0ORd:NwqdJ筹 +I$+F2W|$d=vkJiFv:?4gpyKڭy{vkCBrm$SVCXJڭ) rDuNM5ڐ\D)j,@\,\ =C>F>"/5 ;*ZIf9/(΢rכP+ɥ54SPf(Bh PQ\R29KSF,lכP+EN+e:St_#i'k$޺6+ 0f_ʄV7_>DU:wTZI)G7?dݼ6!93lsX#ϏhW|bȮi^<>s 'Y4/hG"tі4NY} ^]L~<>h\=qtέ>2RZR{- hTGuD$c͉s.cgTTL{*0CgfoVJ-~Ku-gˮ-@QV";ۇ,Th@̥?haY/]뷱,lBkYO=SnUԯ4gaza ixdQ|im&ۣ:-.-u1$[{zJUMkgRjNLBƶϩ}ҕ):ϸUY6m%3WW!w4a(y3 SnavҞ`R|T^Ż'?-&}3Ṁ">Bm3s?Z}Tx1TxF|!U` .V'm4ԑcCmPtv`Vw榁,\H~XDlB7`[ěJ_5]ca={HX}-pK{bp)xX8,<1qFK3Ȧ=!̍\k|pADY$F@pbA"a$bqu3wr"͌y'J{Z2D"-x1% |` .QJqnVDD"I3?qA&K.j ޟlIjyH ))JsUNÁ ?6Lg!œM??dJn)MnJ#HuL(R8RZ:"QhcLdG`AY4Iw1OMN9W]cKuVP}MoQ3!VQ"?,j+z=o+m>S+O!NU TVsM;p=< tWa߭cVì6q5-giihV6r2oFsGB \PTK}%5;}%[b>6 m6lYxuG`4BE! "t`q)~[[k5e^[*HOS# m(ӨSlBZ|]0O">r# -s&BYʵ!\(_f>g. 1rYJ\Xeeȝ*@}`i&NN4^t&FZ+\j{`(0iaQ^pԬ y(ST#_p 2\0ay-ɢe, y-Tk^ '642E(xxhj:f!jq ހ* <,Z +O>Uu9ZIϢ|zx>{wcp?|V gaoƽ'f@$4+˻Bf6n}({*_6]= `Kí"{ BZъ`7OmyAOa:ΪI \U,&߆> W5yA5(J543UVMVԲ ėj\WXx{8^ŻxlfB˖)KTN^I*Ϝ)]!pGO\A(wOF2^yn&k^&g3)3H.bK(?!",M ,pJZ ȋb;wtի \#µw,tz s`sBtb՚b96JĀZحgʞ/d_PsJ\ɽ0ܤ@õWͮ2j>2Ϙ\x;o+L.D`?Tk̢4zx(n="hU\!a|:p w/[lpth ۟j${9є[%,Nn%u)!*0Pk8-STH wQԫFq d [Zk}8V+|~߹3H7pac 67qm6US2on[%ޛjE2}ZgeO5'HNJ`E:m%# ajL.tDY=3&%Co6CpUٻi]gNk 8uS?Ѓ_]6A*Kv{mQQI0-h:; Z5[~g*K\K^O_y^tS/¬euBۑ|*J>oݺ)p9X2QhbN$L:ukLiݺАo\ET=\=Mk3֭ T;Y"TT|L1''񎧜1j9#yo4XLQ L?N&ip`d\2˦e$R/|eO`xI%줒ʼn{^=ܤJ ʰGԗоDKWB])>'tWX.8:rW'9[g'hLc>&]?p1v\IC1^ 'BJKGP< mLbAECN%vzHi$Y&Ae7?tZs׊ZZWW>"P־aEg3K0p7odzvɁKsm9qfدIf_{TMc$U+!<,U![{ht:=)'baU$ oV5Wwnf2Q+8ۇMX4'erPtqAp:kE@A֓/uTsڂs57_QgÕn7` =8[S5yVmKfUFMbsaBFgE}nCFCr뾛X(4t@1q)L\+>}CނlՎDM-qk΄dV:Stj,x2xLEpo 9^[A$]4;:R0c0S,HvۉP;yzYZ>d(gYg$ښ5x9͂z;]) 6C[rؾzDɫF66yM.p.-RxRH9/K1H1D1u&ccgB;jxі̔In5=?kdOWA##6Ȇ6*`'[ L R. (y ' ?+BbEmSd*YGSk56xp.xJ[#y Q Zk"%Bz%3BkbfֺZv(N0хL!)0R&j5R JRm47!y]*-e< X(ѥ7RʍD_0M_73x *&0zA%uA @`Wɺ K0Qz&S_.挰 &̓^kl^ Elh"Dٌ-f>,$Q-~0dTTY7F&Kn7t pr{wcojQ@fn[}=mXҡ|*  SG] !SZ˻oRd頻A(zJ[E4K1Ͳ8ڂi 7R` 0gդdM,4>9@-XbG$ЈA*211VqnJ* <(;k)7ZOH0O88J*!f9DHc&T=?(3 nl7S`bp2]mo8+F,nuX|+V4;,08̗=D9u^v2dw'VmRԲӻdQb=KU=k#6AӘxdcf=V_Hf񐠻|4%ƙZ\T ՘TLoDɧ*PF Gw\ gU5) * ׻Uͭĺ:ުp-#SQMkLC8M@ xߺ: d@kunA3S, D8 9 lM>AU`Ntϴ;+%KЕsu3% 4YjGd `OeHY344ˏ:v H.%h^r0KpNf <%$Il INe ڈ1KPUorZ<#3ldᤤ%4|XN攁n8Yay/0jtY7L* PckD90+K=XaG$xt%#A><d#:ysS}Kweܣ,DR(;1I126!qSȜ, SsPLH,-zLAstDLg[P~Iyb8_l"B# V,r+.1"/v%&yTX.ށŎBJ_z Eo_s&Cp뜒ԗ} u:3)NA 좡F+;&e_?:[i,&b_ƕV֪{fr4 =-gQ*,p7d>a%*$qS ,nFf 3VRz_+)fymQgJy֨s(< v(+lQ E֤> I5Ԛ| ;ͧ\ U"eZjCJxP%(2RIbì516Z6ag=&ydK3Ɓw UgYLZjө2ȒZiٴ%:ٶnXȇ9ږLbZ18#W:aZk,Z"w7t]떳8R7" JVLUV2FN?‡+q߾$|SWnvYƽڒoXV|n%ڦ,ᓬ\~$D&#/ma 4kp7wƁml·ûQTR&U9j|zV[ 8$&ٶjŵ!׸%DP:r9 *+ '(} x TkYڿUq*ѬP0$J7 daFVڿ 3l:)iez#BW?&c,_DK(5;.e_j4BKNXj&X*], tck7ί* ~,Yyc>-:} 滾&U, S%Us}kU3VT%V 5ZJۿisJ@,4= \|9w}Ft__$|%.nٟAOy  f =U2*pgJѼ{]9q2Qix_|[cR_>Lmx[ +R^]qy9?`܍?"YW6zmuڭjL}{))LDr͝OH,7 7V%a-1"CEdYGb UݪusxZo>KqAڶkEXŖ+0 Q8PrUbZpMR\SW %*|LF_8i11ǂhECy.BC* L[7ua 5@dRbdpbiW_~%rb,$xL9E6 kZ^;A% ,g[VoBa[oچsFP*J\('CkjZ1=%"5&.RHmQH(aȤ6X{T|c G+ɀ#iF7HR!̞Q$J5oֱVラU'6h|*R| P#\ S3hUTn2>8gNy^0{ב?sSIxuW%`wFɑ]Cdߺ?xgBpZ(CFaHl%ƌd?kaG ₯i48qo?gCzW *"|z䫧yxGV:qSC 6+0B=["i!K5nzRN7r OSټ[yDwBsTbP+Fݻ\zL˾ $BOZeeI&6%@LrbưN,Sԗ}I ?ON1XD 0z#JXA֚hԜ3[HAH_{hóa/.F Z `F u&`3OrXDUulh; ]zиO *F~>E^M=OУby*G/Sv3e7Oos,ӶˤS3e7b)( 55#9;C{Lco` \X><1;r'b;Ϻdef)gdrߣNYdZ syg))3tȰn!pq!?=\+KGW28)^5)#;_a;PxwX~g', 贩?}~aU]Ftoo?)6ͼTbTNp>45Eb?`XwwWޯƖ:=A KATƙ.DmA d!H Pr*9:ţYmݸ oDc &G&7{mx R}6G?GMGՃد^j™Ӎ)Ώ{@Cqmm?AR}Pvk?WoENhz޷o>FBƺ~Qb;w"Ge?]Ghn Y_e'ƹu觓dMٰ˗_`- :m6V V,N8e8|k)I]Fo> u`oԒyÈ̈Q-hfI1ec؍Yž-D8@$z"@ΞnN1qe&q fŘA!N}.6S%cJi7ZZ!,k,+[KW#8uR-t dA^w ljxti葊n?HYB_j;.e_n)&nS:%HiJ30fiC F,Yq1ql<{ HI3 S+;,$Sl@;ʔ;=bQIgE7+fKҷu M%kOmm 0e |fv;)ߛFUfs.g|U'g$/|ԈSJI1, :muٛN]rvE=[r04Yy@#vZ-PsYڞ}bhJXAY1+7TKQ1ڞ.j{B)D;=V[N=XZ{n5}R3L ilX@pλ 6a9WLQչ*1`bx$Wsb)?*/3VV)h§()`k§N@%!+|j+1UϨjիܨ ˟LN9xOm65-GfRh%?xmkq#4^]#q뢈1$Z?'av_ٽo;F.@(j֧ftoU$R&`m\9UVΜ#_a iZۺ \(jELb0EDluBuLxh|UYȦXuou_@Vv>i=X/+Z=-I7[F9nJI8K #>da09dӑ։ =/g o>{ =5JЙo}"@}̝cy$S`w+=RqTdft>em6**]KbPu;ϛg$nrS~VtSYa*1uzrH3Esa]vyc3seRe)NS!(\e.$ LhBKPV0^4XU 1FQN~Y!YX(3`,c)Eᩌ8"L c{6ψLEHXU !Z[+bv jP lgIbɞ灢Qmgh9J()NykHċ{"l}rjd.|@XbĨU}xVZRMMz-N2~n3 kNp*Hm$S$gR)#!k.y."k)d]k,B#D !n%fJmR98Εr9-7շ6ɽyAdL>޶!$b}Z-l8٨∟=&/F'f.b^?^ů"Ku0T^%ҽIYmTR-~ĘI6P"u5L%}QY,QZ.ڷsdJu-l &hn Kjɥ)"2ɄUkm"FNUUyzںإkű3U; 1CJW T|_mB+;J^'JaN=gWj6|bAU9>!F$uX!ra[$.A.-DF?e/^L$ͧlVRt%% f݇ 2RfOhi<:ˋ??NMN7֢T?-'%X~S=#==%@!E ) )cֆ< vI*;آ\w K;T`wwa5Q-866mOrIEP8W#y*YH5?/y(5@+au_Qj(dQOF~W6?%G0yHD N\erZW^I[e; 供 ]|LhE_8Qs #Jq@N30$<‰ U kZwLAiV̡1sB:4ߑT8ԩ NROIRA{vNZ|?DF]b3Lm|f?(bw`]ɪ}^5N(stfh.J3sf!S i)oTsDDg%RoE7jsδoZ/K r5>|wMۓN ƫk8~:Bpu;S#9}lwT=N e%0y\GsantviКjf~cQ%gϽj){LUgfP nZ>|6"6?O !i u^[Y l` Hܳz{`EΜtb,,YVk?h |\Nz£)p?arP`fO%N %N6$fh:Ǔg'CZMhW,?M=lĴC,5"UNlyֶfPkJ,[.fӧ*ZmyeazlOқ9QyBJNʖ/ENJ!3]U0e)F zsA-W)'yEܽB>_[ayJʺtVi'SZīyl AtoSŽlƣuoId7N^$dOȑ3}6ǹw%"G .q9­w#8ٸSsPct-~~ܟqӫA)DAFAᄴz#\n-~5xŠ(vH!HQ}K=gu2K^@ H(G61Uu0^ \_̘UB7x½4/5'$[8d꟟UQwF5xWU'$0qNZ+ATFDB..i2.YI is[q~Txn jp[ 10Y,ݢx`%؁wCYʵyqzgũ' @lrlqIXsL32π$ǔH(,{CRI FX_NT6r){gRbɷ,2ZeI=!g' ]iSoϏ&#̙_ ,CPRN0ˊi|oNGDa ~>dzb̧d5׾u/2Y/@f#sLb0?&^yV  z_ V,q9gu'c<1M(} wN~+ؗPإ}ym^zSYfa\)g5[jꈯ~Vs۳kbQZJȖT,"/&k4Vx @ Ĕ[K- }pU $0"EL@Pb4PO*6f_9;  WZ4F4\!)EkS.!IxSEK}$LOH\֊]Re_Mɖ>W&ZlMnYg ciׁ kF|XZb6)$DQ.Sk=7X(cq6^cd#G;),ƓM-)8%8/ueZڶGP-n_Iڧ7Ś=gBnIc[5`Jy b 'YjK.vXHSID҄Ez9Q`qQݘT߼(V`)0BEa\fj 7}(PXJR&i,X.|պ,/\sAЅ`vPP-. |0+<)I䕠Jw87/x)pk>q!vZ*BT UiKRJ \5 Y%lyԧ!,#&iQK^ґD`?} ׮"%b_%Sv)+U!mvA3w_h$Ǖ;T+%"Hd$a]ǣޖ^M]qWS>l;$)f(!̩"j48ΜPgQj©!6V"w6vNbQSSTA+b28W?<N}CtcOv h7xشf9aM*dS5QC{nՄD#‚#9sgُf0ks$Ee\(d}%'Ty7ƕCq<`p.'r4ŐVK,5Ԝ|Ԝg>r`ϗAg`ϊׄ2`k;vxg>:5c=ޤ hz^?*SgMm0{K0D07t;cІ%hN6/+N.F;5^])@qqjbŬP{!ji֋6'vKZ,(IjsV͋r4AJj%Y]JhJ82` 0w?F_1K⛍;N\rd@ =AHv^vu,?r(Me;#-BwCӥV-#ޮuBrԀ;B I;gcsuպّ˓Pvݔg} q 1S"X&(aD1v|.X{>- +EN綂 yHoZ X2NV*þmm7չK^7)mpk7ǫ޴avU)ɋ._ÄTm6F"|!l}h%Zmص<^UElvBopu"QPk;x-o(L Mft%7 D,=w ވ^E s0B9%qAcmT &n&yNT ,b 쨶C^+V2N8` `v,~]k~4,ݣhyaov{ՖY4}U4z?N~?d;JdrO2 I/\#iˈEl!ii fٷ2tRqTBi$J A䭘d1o26y,c 82B5E,#@"# q RZ.lyY_*`lR[6̬ 0VL(!P 03Š#دF@(]< (v̝d=RxV,ADHVwAuW 2-ɡM]%I7kD9eU`aSOresTOKT+DP*O-UW Q=-S-%}b|@^]>s.T穞 o ҬyeDb^< aBԲjA綱fDTOL X/JO-Ĺ`H/~NY]8UY }?$BwގX)O*WII-*MO L[l@̌_K:,gC(jͅ{?f\FFW_o)5DوB̶dbGP]ox7ʟ;ljy=('F$ ]- 9)\v.jq&u ~>-_^^BIrqC\@-UNV(].YPҽ]wk]%9IS뛅䨪R*#Ԃk¥"_՝?mr_S7iƸCU)xNz1{b4A `j)}+TjϮ$[ ÈBWVYu\{ѽN֏~*y§e)NVFFHȬ(/Ć\6)oML vy X}蚾Qyӭ^鍌oJkzaFM rIy/zjolhajMv|^ξdgˍCHo>*w2M p*PMRThP 8Ac F)h(GcH!gh#'^OWbPQO9N>B=7ۑ@ l.'oQ%y Iw"> M|%RKGB{aE5Zoh' [@fE& ҀUy"'pmE~ hM)+?lvd8-Iђl_|3sfy@z奅B0r//E/>UdG*u6htK=uOP\fu7(JNъ%(۳L"I8"ɓTR(XZWG바ZpLE &k. TƜNȶފ6)RrFkBp'dc(OLHQE_PUQz}[@ vKKY ˬ,"AdErQ=-S-3`aXȈ=:ei˛j_-,\l.2ς,o$<[talёS XZ9HO-<1J<2Rpm,᧥ vEa`!?-uTD B j\fa  1iYF=| o LK)"A`!Ҍj{b!Z8A`!Z0BJf7LYXHfTK_,ko6A`_{-ղPFl[ByFfK23rF XR|Ns'&h TOTK7V੥E@e%H<`aX<{a,,joZ7SKqv,SK3"d^+?{nb!/P=-SMA o R"BB"4Zր}`Wt\yzJoV w' i)C !?-ͩVH X+2N y>X~Ccɉ@)YWfVS(1KD\Wc TTBtEa|gp0XY5R}nKtؚsեJJymg{)c G0 \Gc☥DG{ՓP"Ik0_ł'esbT!buA%:?}v, ߶pyΠ*]>Qy*H2S%I`̛.&:HDK\nN4YFSAsEXmx@^h܄ !Ei,N\ؕz|-nje;O6*Z*L&)*IyBcS Z*17ϱJ!%M#w#'mޡ:,&/pŸKk_oB)ƁD0E+&1׊ C$R[{(Ř#ĔJ#x< UD I1T%LQ f ũE?5(r1$iDU \%(!q$x=y'$h:j'dˆ k4WbP(7H:.|xi_ҧȵGd 3 EuK(N!)c1c +PaW >[1:aVNı&E10,MX[icE V+N6DLR 1QL"9'YByTs4#R>8R,.R\⽕n߱(nStΝ&/ww//&| u=]H[^VIݲ1mmT¯݉55$s̈'SYq?w~U=)i \ݡ-`3*,v-RyH"5{Ul +mhB"#+FރDJK=D 7W=&(D͟>g[!íQGq|Yg.Ò>K7;9۱36wyZFڬh0[OkKv;Z|b_ݦ2KG~an7$)[`Ua,=#F_lJVՎJrzֽ3f%r*YGZԥtbn 6@<g\;27!w?aH*ĺ?ǟ\qNw\iT=KZ[{ԝZkPB .k'\*^&{fj+wwk -8f_:OTc/34**1"(΢@rj)-]Guv^ݨ+q?obW~=ՂP޴L`X -ܭ_V Eu9[nϬ U?=x?e;k{~2yV`ʔ3(Iٖ}~ULkl_ݬsA۝/;^;Y`(_~6(󛲆4䍫h-",=$-QGuQzLywV'5!o\EC[![]N1[wnfy[USq0r_zbɝ:07ӞKӺyp*B=G/F, } ^{Fa"b&`稞V=n;"x>ɺ[cFv4@2%C&\3f=A`!E~ZQ-=r,E,e,.. c_- {BY1  +Q-NjZNymxdlLSWa\fx8BS{&1'4ME2 1.W" i%)jW&P4Mc3gPTT2ŞB7Tsİt{\س{nB0P i S0bL@PƐB (,)Qaccb(U:I$VF+ 0PlJ)nS$yT4aU``,:[F??y?}{ϏwO=Gu~˵|xP]Ζ .ǎڎμWJ!Rf Oo"W&Y /fP|Ax)*_h/im!9ƨI;`.SU\/-qc ~%d΃"%eO1Bf9AjevliR.qzva3q2W6X~&SXgh9sR&VUv8z=6y'Ow.d48ZM2CZ;_\lCw([" @wt-< jGm4^ӣ_̩!U.\j¾3$ܐ(HH ƒTi$S!#̓Ȁ RI4xSˁ^#G,<7U鶧jLCLx/I޿?zCVοgkzZa&y~-y]N hwq7{:k4pֻ65y!Xe0Ol}vG?|飱Z>/?=MP$IiLS8"BDRz>O4'bU hzGDYc-6 IbK5Y!?{䶍_&3T~qRvTj+-Hm9En/@%$P୻թ8"wppn8(AfL:籾8dgڵr2P}0lz哑@p3K3c^Ґ7_S*8zdAYh}`GmV|z*!XWmT[_q M]*3{/r߾cE6aߥ@.+'j=6jh%(M[',ptً&9P(b`^F61h !ԞckP#UHYip8Lh_bhtAIXrpL-?5F q$BAS8JC#xlԨBb)HQB!<'w'CwEezOA`ǽg<厓u)eJ *eKp21 Jg)<0=ў]}yz3{0_O㇧wEl͜O}ݽzc#e1 Rۃۿ#Kx`f0V?7w|ApdkA fNͻGJ&(d>:jl\2Qn$%eo sXz"$N딌Y*ҷ6;V[|̋j%bn#CI\>XqZ@f?d핍osdn&!3{"!|Ł B2ıI{C}]9E߹ 12 ȘS 9x rIT!*'2fcWla;.8c@@C\K][O8@)$7S*i8wݫ #&^',sIt90&8;fY>뺈U! N0 H4Q q4&lVB3$ &8fQLɉu+$ε_><$=K$*]E`!T& U6+ ZCXRaEF{*`_FS$g,l fSFjog_w2"p!m`FPp@86`B(5/A¬'J&kQ(B %4ah;& =B]^׽X!}v1r!x)Tըhe81F0G0P) Hp$!FS3Ї\HJValY>զ0%qVN)q$ݳ_MMw)&JɆD1L5! g4N̞Kܓ)H5RaH[R . ı[dЋڑSTPMskH#9%rGnc~ nltMzzf=x/B\f:fg6g mu?Ԭ'+M} ut( n\7ѻ[lxGNRv3rIKU6oWĢ(6~LPd{Y],f"ǹ-{x2z?_/n)~ QҺ{Uy_RB[/ڼI3\N5f iid|t ^,1wT+C6Alns |u/Z-88É^dc~(fن&gW60=|G#3 {ÿFj6Oze6&אָ{J7$cul5GeSr劃:@(iNͺJT3yfF("YM|l:Z#`Q?Wo~/:c]9(xe>Z/'7 U 6Mb1_f +1Ծ{u Ohf*+B%,|Wo7o12ؗ3L?-W=ZӖ/ NdO.bmjm7΀ܟ=Ҙ?؃襘׫YL& y"%S#@PEa#PEt|ǨZ@ ZnKڭ y"%SBN*ڍ\`͗.;F"?bJHkͿ0Q!!\D/)̱`gB,h\"u@#0BVPD$]*Uێ/GkO~m^H/Ba%L VfX+ݬb5]v!T,x{sQxֲIo7c˱ J۬ζC)TK?u-8n;62|4wE"זJnRBwjJYRv6>gwLzif);E]jR֭!G)CC*p68gA'c߂>M]X(LTgۅadflXla?;y .Zm}Wc sDO|>~W\Գh %`HvEU^`%(L:x  (uNA䔗!~x̣))AeJ:̆{I;~rڻ.IulEz%6?½6K=Cq%"cH ‚F0c^s.o/րxI.rIQ;AT։"3GF"O_ZUlPc_U3LWҵ rnͥ_Bp^]  DN:vR1];)hBDR[>O\ĪGSFP GP$!$DH%Y( < 1$ $I0SX,G!oĻs;%ϒM611(5!HA/S!C{3$1-xHAJ$(FllC $e1Nb? pd&+%3 aB,AcɀLȆI:Sj?Ԫ_R] gRL9/c5LbJ*|M"EadBbq(1F֣u8$( 1a#3#:gly`!c-q88y+6N7Q Hzq `AnS t0?x2f4$u΀4?*އ !9x+e$c9N] ۃ )a-]]JsF0Zi1(k:GbR-t xC'%\}RhS:4N~H5A{r]V2^-6>i|٨8;8p/lEZ}&ryv]OqW{ּ_l9lHY.®NTNwZ8^yUys՘`X^XfO.dƲ}3% =iQ ?}^ύ+ qCl|5Wqr2< ؅m&T䡤/u P ~ !@xzf ys䨡FG p^ '/[ru1,}%wE%[/\ZQ[ewbZJȱ[eEKw8v偋QEugXL n厎ڭ y"#S"WW;n*S8уEYS峅jЃz5_nV7{57Kq[ x'R8jr| Yփnݱ71ޓQnyooP.e)hv$Z1!L€  "cB`E6bx-ZMc(-fJ.*nj5S~.ۭZŎ+!ZKˈG2C(2'$Y gQCr0y!a<Ҭy#qZm E1tPx0|U>ywPx> b=m`72 ^'&z3#3c 3)q(!m=dt}3X.Yx.l5a eUoMY=q 47ֵ3l>󜠆%=1l1J0}=vxs]C)kTȘbS',YqO;칉W}f7m`6'O,(ydaz{ޜ8ZmxӹHIi=9Vqzرc:^1ٚj2winӭZNF-ݪ!0ͅ{F('Zm7t+Ϟ0ӥ%ʴ h2JZu/iH vfS@nQ/KgX<)' BuSiSĴnU.`>]: .N7S shdЏX$=4jԥ: 0t#o;ĝٍr f޹Q.aX{_V3č2Դк6Zֵ"#P9~IY #ƹ&BDb(!Ed"c*@14p(AEf%fxSR~鿖e|&GrO !G{dD#DV.1o+s"^-|e>;CB \RcʨdѸg{TGmB($=ףLdw"&;}IY.GI̱)Be|\V 2O29|]`%rZ\WOjݵ۵E.]n$1yd0%%Ieg)A}XHH0j܋-!NSľqκsKC|m/SeDf$nno"=<舭E1×P.'5?NJkDǫ@O}dIMpԤٻ>-S/!kl |:P(W/F5ϾIЎn K]Dj @=D35:_bauzIM xs#,>w;Pc f `d΄ѹftę&V"_#V!d8(Dci<9y,% O1+ |Q=PE"bH8%:{z`9r헏1◗N1Q{Sѭjh0F҇=JRy;Z̆JPl[ꛙ/>)6y# ᬬe? 5~XǣOlX\h1.c=_qxŠSLE!d 0VءcV>O@8@hߧ *6 J!V=wˢG[Te]ql6Qo*s#5hh{;n.Blfq mW3nI7]&ib fR; I;^Lc(ӔڧV`eF:|zɀ>&4]|hƦѿ Iakfì)e >@؛΍Qn*[]:\7̅ O([nCWG\)asԋާ~أZI~}RARrcHbETN͵9qV B4G\10.uXy-&[nbiI]ؖYrrx%$k#c7IН+kYjAX* ܻ,&ǚJ Jo J"B3I rkMu . `kE#"*P00:+drG,gLJæƕ@RW"Y-:+A p)ͮ`JI %Td-ECZ@*g$6Pp^rm5J(:0jNbx Q-]VM- \Z(G( UV\pSU?'d]Z=zS4(%)C*IJ-)% 5,$Wv-v )WrHE3r^O>5 sfIfO5s|(ezrOfߍJ]rb\["x=&b¿]e/vnFz)l/UijkTX% =t?]lSɭ>\y{(4ߓc!6@жx\R'B0r&:xեiWBo@ϕ.TIpC{Ī>ŗk7OզmTvIkWy3>ߢ (%ͭ&D(?9q4QhΎqu]_ FH-8;ݚuRPlYӥ tn#*o}حo%"_o!CtEEK*|- k4e_;r1+V 7^z#w+LJ?׫3e2Q;r%s*=U: Oar#٪w(~O%zU^\̦6% h5dǢs~ح[K1A޹ӷu߱i.&~^*XCEo6&=nVE7Mg~ [1r{Na.,'(ruCېEL:unPv;b":uQE]Kgڭ~LovkBB~"ZG[vuWnu1H諸ݮE*mkCڭ L%}bs?NWA{<7c,lX=kAQSx|wY#s~}T\^mRŖ8:LĮ˧ϒf\bIz 뤉)Hٗ2W(\0t`-è\>1(-W{V&76~H.c&^?>ᇢ6%no6nS+m)_9#u s/7ER՟r.#\]n I(FT-Wqň~{BCXfF+1NDfT+v~ i=^95-+\C-Lrg `)<򎯟vX;v2$q $og௜mTI"p[ oYgлO;ٝNJ~kSaUZ~AM*mRZh>EZc#bq+ǃq|Yܻ8|]@z'%Ȥ" xd동Yj3G?/ c\^`gIVW9CAK0s$(EJ;=o.IQfr Sv UHaeWl5eD`)AT?Q(.bJR&ͱ9Ǩ:{`+ixur&y'W=7 "ab7[-xkl=R)'ɶ|J9[?U'uT`WArTR `Sٶ_cSOfXqZz~鵋Mk;ga2)VD]ljUwFakH0jQ?,.@>'Q9s( ×m9Ԟ]-)wdsUρ쓐cwYaRcjp8(S g8?@tfSdʉ<͘sZ)#pĉgܸhiJI)A}*X)~7پAUy r#Nu՘R-vE)W#Rj_;$+K+v@9kE$6s)(7y k AQPR;o+빿/Bdsh3>'Wܰ7E-^@RNFJ@C7DXPͰƫƴC>9$\BNWT+TNƹI54)+љ4-E֚g1}&eRIkEɞRL\#,>w;Pc fa3ɜ qr3Ѧ!*ys"|M2ɗX9{}fh2?;.O]Ɏ%3.eRf\;qU!crHXP?>c/x{̣&/M"]׸*^B'A'3"7].Ja( &>>sA z-.բ9 \Tj #- g23 o_ @M=u6nMzX6IjXWd0Lvc}'czs&j iɞĎ8Ι|ouR?ɥ&c6 ^W(W7Otξͫ'Tx|:qs̉tSt]AoHKU.0;񦓪!‚KxNϫi ~^tR)\R?n&qxlΏy[ŋ._ȶ_/GD_aUY <c,uҦ<9y,% O1+ |QؽJ ՞]IHh KaEP)e8-گZ*un4)q˅@2 iRVP-I5LCI@sQ=T?R-A*k\W0I"QST?RoRf  5Rt=GgѶlqɇ)\JŠug)e LPA4$y=7đt3 =rTɃ_!WҪa ҅5Vj:(*<]*Њ[z'l>r ^# PsZS4spHc5 DoQ̏'ch[O5`w@u3C˩ zuw%6,X6<m>"7 T"ӽ_ \ F͝HT1['I;y~EܑorMt-siQpC&Ȟ0S x-:r+Fˢ”vY˰-6\B+ugw trOpJb.Fmd8w蟦/S=+pRE; UD1 $OǸ3g{*GEϣ"<|LVJm3f"yƀp2h +kn#GB/=ӤpCa=kUCQ x:oGNΖö֑/LLn`tn|h#^=mpV$lʳt7"Nf*[*Ge¢Cp" t+nHXd,w,frZo:;wFU/ܚgpejmmFBTOG nH${Ocg CEKtjgS])~,6+WQC[Y(LQl{^3rO,Vڿt~x^yW[O1&Wk^r?b)hvZn$H_B@i `Ni]_!7_Svu՚6kO֎Z;21hF NCn,Eےɒ2>{kGQTCJ6NcXq zrRqQ­Ty8S]$jh:ӪfcPTc&tֺZgWSA'_gM>94=>A]n jXl-ɖ;cҲzgw_1+ .uqe##h<5%T V25Bv=F&ú5AD7Y:UUM98l`*ܷ9gçRM^RǪwJZXޞej޻ hP**ERAEcn5Z/w'> "36`8}Ofla~@C4GiǽB#_X`?sEU#Tc m۳/?a#+)9L|MIh!%ِׯkn[D:'3aos~SV2X3sNއ'n@~_; zBN_F&sZ&$~l^ 0A ֪3=5rRu;мL 9TBÞc]:s2B!sɎ1vCaQs1BB5sҕuA9 \UF}*"wYAw^rLJnJ o%x)^kM6UݞB-*I8"M>wst XiIuXuYX ).:uhZ+֕v.."EuS s2KdK;[|TS}SM!.!'][6L1Zge8SE63fr=׋}-yz}z&$P,:I(PXi " DRaTHM"T42|0U]Dԕ$7݊mUk޻unQHٶ "URA4-'IH+)inE{J~!3iJ(58xҜE^\/}u~9r] YGs?D?>} &i:=ɒJDyo7Q?g9;nJӛkco4w21v:OFә@73/hxWi adᵁfwmֱM<"{z^û~&_AC8 ?~}}u~rWZuM''Ș X2ѓƱXQLH2׹q,!X\<)^#Oq8X*d%Q"X$!\+xl H6Ip,L)A,z|{:a*;El5?E pL2RPV-HǧWӛc󡝬 }51Gg\6I‘>'I|N:0%{Jևae?$ڈG|kfZZQ:6?Hvx^ԳRgD4DiC=//Vt GR'W۴ <l# ^tQyjCq$Y"*R R1012D[""%Qj-NQbE<;U}(B0,<)$(VKH,"҂Rrb#1R' )Z T^(2I,pD1 fCKFcXmF6łZ+BԌӁp>~pԞlbqqm8kquϳiH.pKH;rzU,yIȥҧ1\!C' t|8q >ŽoM7Lg_ tN9s;xRaHľYK5x&/J".0g: VۖZj&Ѡ+3vzwȆwOK?.JWueϧENY u;v}a]qhf ~2#Ⱥ\"=^/2yZc`dMdv=Yoʬ< ~:ɔT̢nl6ĕlP@Q׏ gb0E`eu&_d>uBj`9~[D*dzRyS8CЬ@X#'aL 8Em+jCga8>=t>HW=ErM*EŎ"r{m Fv-%yŴ[@n%r 1!ﶥ.fh4f8$F8b)VCB*C% (ƘHrr~ U˾(THc ljPQ <0!1m2bH%c%XHi@m@S" !!) B٣5UZYHmH$ZʹAc !Ih!1FR+O#NkE\1FL%H"W-JXlD$ Ca,dCjuNv1c%;Oc7-~ڜ=Q8xv|L9ÕA\Ő.[ƓWЌW!t Qg*1oQHT:g-9)OL`AEDZEf2-&7 ԣt7!%5$\Yق>%'Y{ 2Kx c"$XBp{{_\SeN>oٹ̩lw(%F 'iǏ \p&`:KWMYeZfY?/j<$c.rp$z 1 ['\_X \_QsYE(d,yƓ/V/WDzÞ5B. 2"RU wcqkIe: Zꪸ:_wu@Pj(]Y7yWVuetLL!wJ"h"p :"N֢:L+;UDnn<]>kLuQ/&\=@tոZ|֬"W0Ul%QykstŜP3u~cUn]4ˤ@A+Υ̲C{6U<|ؖOk $W} zٖwhy S*5'u:Tnc/9m҃ۃ3-M=f)#d(mk!DIU C)75J$e86k}MJZs}Mp b,㱳T2Km&A\/e8W)JI [9xo>r%޷&+1 W.vN7d->;[LBHmE/n:ڔD(K}BjMnT7Yv[<NT2W\t#(OPwsPVCRmK unQJgc/3(: %"$ [a)QLE4ҊYv+9nHpXvsm"wq>gϰH+K~A?J~SMv$p^L0xxM`ty%&)ҏ4mkxδ9P"+j7^F#@\b(8$ݶԊ 3/E|<vk&_p Ee6Y| g* :TdAd⯃čCñpiiJ/t:j?ٍU%Ͽ#o6 UB0ZUw?u.xo#WփG&Z:[im.~=+}saq !_Ծj)W$ Ktd z皿v,a9}ƤV tZR6Yz[TP@T/&VۨKAEellW-9MMig70 Djs'?:VPBѣ?*fhR҂gNޏmd~h͗w'{}S=A"IcʬcHv|tE]P?{a%!-+Je {}u+F=z\^P|! w˽v R3F=*D!^׎u#Dļe Ķz;ZZz!Ģ2tr65R3 #[J(6㈬wJҭq~72w]§bo]*BŠh4_JH(q 0@i[40VI(UJR;ʼLcS* kt Z쩮)ڭkvF yZ^5R(sMii 6_޲w3>_~ ,o%>8˖p_ӭ߅[Wq9zn@bm`bK!ﶥ$Gkm ibKAw(OS2gt]٦ӰsDpS@-O1\O!OԃH%-_Nt=?'Yz۰ cDAš42R[# u"{hDpŚqd2"Vd(FH b",4Wb+qmE1b8n8`\]`8!l%ntHm|/ 5]^bLrg! GwioBXmbdHQOӋěae,4wi]Ѳ\U]_ldf*SR[/u̸ؙFQFl,o{D Ea6XtRK+,B༗8ywnDE|/th#*\K`k!bk]9%2aM Q().`ޱ@gbC} je`.l9$aDVfyA|vC3ԻQ?ㆷ*<RlĤpִmB׋yrUETհk&^jc3@>$41~D%Ÿx/rײ K݈t`fF. %য় gXI"t QnN(w/0hjt|WmwE_J*,ƨl0FI49I^0Jbݚ5+_.g@~_H~ש.OL+#/iGvlK/n+aKC2rF۷X&m6踳폋foZ@X8tNteZyH{y}d|6*,Kf+555?I'YsK4ɒFwtX+ޕ1% *GV20*ʧ @Z=(Zr[6z0R3j1D*Z_3jzR(eDKiɻiQoAu@ 톅`|\|eU8'G3Z:"U6s )%R-|u -<-D>B)c(F` ߆ l OB)r5[ /ԞmQm!R&3{oA}5h϶m(a.) 6x>>ɕB=.g0n*@ (Faw i&*Z-&o{ qb`(t/u"uvw7pi%EtI7j¾[o`ZRAH,*q*BrlS9ՒsWcddxR+[^Pġ-8]&Y잊 Ӗ\=#J@9ghT36p Ë,x!9mx#l?摖e 5Ԗq(21JH8rUx'H chW zJ]IA(SEiL'P+34EVI+UF 2:phണ=H]HDUT)9S̷\=L9SL(R m%'FɌxϴGT"~4)ƣw:֫Wފ1z Ԁ &,(a6(JTɣbTw!"X*,'zG12g˸f9'Jk7e•V) @qUV*)$__Pn | vS#hoXz'XwӢF3kVPSor9"e3kuv_RL @^3~|/ňj}o[4HXZI"ND lӉ%G#]p tA]~仢eX,yG sÊ}qΒFkTI3Wi F٪6<8<3T_u5P/iIn)(ќFS \p#n{8L^9hNopcΞOwO3ɖ^22f7Oxq3?^~wAFic(n!pEA~t1MŐЍ8Н8{t1:]Z 3:X`!|W6/;ɀJ-E6T _1GQq QQpJхbt?7Uv ބD3G1Ȥ+b Djle-d?B)cI>oC}maYh!Ec1ma ӎpXmPb(JF1ieTq]Q ImH ՙ;2BFS?@+ $!ժ.w얪% 궕?,Wòƣo)]ww }© g.{h*ꉨIuk`X@`Oo]5m5uK%V[(CrW6\alem6vt;WP=#[l%CmHh?mvY6PcGp\m( xݝQa\JY>s>М뼎n1s^+bWjtPkrjFVm Ҝ]-42NûE3.f:ۺeo`y+Hcѱxn% >b˜kՂF1$ d=aa~a5)Yx3Qm p,SD8DXxrء1iﴐZ2j M&+HdJ :1h5$TaJ֏錧BO׻?KxCbu?=w",~s}n|\8t54gFgA4(qjᮔfʄzzwS\nq;"Cd OF@DgnJc3vG uCfn~PE~PdI.":~'yYR+ gUWd~Hvv$H5G; "q$EAW% )k1|o8hS3V,򊀦HHIBqBZ:^+n>芐6j[A>yU䶚ghG~|' %wX |4H7]) C$2胷#n5R(bJ pPvv)|43gSono&:Dܫ٭($s( :!12>ϻ"GD zxvz" 2eLy~;r"GO8H|J3*ۯޕFhT7芟5Z˞݆=DsB,( q%rT .74McVpOCT@&DX7 7jTs 3͂F& 8{=2B,:y)<8\4D:X  nbٶDjbOhB(Z;j޵]GC|BN 6׳Z K&*=tM\0k}a`ur ]s㰎K+mz%JXdUT"Fm.\,> 0cEBڀdĕy/qv.lֺ;vt$ _F>m埌];v>y$(H)ueSݾysTSnQ`uWc\Zr|'9V{zr9N!eTb)gG{ |P%M[J4c(8hyˋIhq= !Ex"rsEq`&| S$_P#`;G8cZ,$>1..x2Ga6Hh;bY熋*3BB!:QDDe`6q`ӵ]|kd4z%2K5cV*ud8'9GQm_8DRsb> d3A@N(-`b2{etq̀:y јrB &9r 0)e(VuÅWqt&ǘJ >qQ!)Mgfq:A֡y4D,#-,WR$mJFj FnJI m帮]k/ޓHyt(0d-s,,fPڣ6Ri2_Ɠakuu9<%JA\QZ!k,;ꄦAl=B[de2dJ胟~s* u> EsL4n]Qz`זj@Tȵ>dR0eeN3X XMq2a8M'm J' 0ȹ,ԷO9 ]Gp͢6;NH솰r98GK)>!:jKr Y;,ӁqkO͔aR4Es*Xۭl̴:Xz:V3֜% C%H2{Fݜ F}X*%OZh$ Uυɸ)0i"bީT+͢0de(-@GSˆǤA? eM)%\N|1'GV 0I GG31=CK(qDHzrஜwnjߔNSv v/֍0:oȍ a%#7ɀ~5Qqxhf {R`5#G\IN{J۶#yWaa?<%/Gس$)HJb%xkC};Қ e$[;1ˣ(5߻gmpⵤ9JzE AAʔ(=4{6K<4&jz1ܲk~/vG PUTh!Xv*|[7nD[;=vǓ.)'sk ssį?j3=;SL]OK;ug ^PQo@ 17rY` W%} B.Q9f% $81nBvE(Zwf)(L4DEqeof>} v2P{ Y~tv00ێǸ.ǡ)6Ǐ8ng_Ԍ0U~ΞVLp)&-rI,l'`igcZ>q6Lޮ~X6hyK;2ʯfv8u˘'y3X7!_|Ba8'o1e1,&9m- 27\N?8 ?8Ck09/%`Zf˽_:sS.Zyzs$_bt6ɹ{Ejh3Q& "z+^U=N(ĉWx$νtuR5 [ ;! c`<%y &)*kKtM)E+QIY2^wӚ+.Wa|cxKFKCӓN3R}ݭhz\OdzClњ.=iSS8/k7{X$CضsOc u{n]?ɛ~qX|WWWRJIvHZ]yGۤ}#$֕ޏ;j5ڹEoN'[OEu}ɞf([g+ZozrkϮ>y7Z;w#|? l-CdOe%!nLƝ1Yfts>M ̍KN̚ל53+?3S=^ywm{O*&tc|rpO5܇ T^#,Ag(zбk,NM.;zf&M}0d_9ܟ{O XT`ehߔtNpY*(gɃI_||<O k=AyҸyȚנȋ%[(yӿoaEbmIkExV9-IW.dy#c9[SǺ=!OKb5xhּxukCBrm$S=!]릱Y|406|^t֭Ӻ[ eNvTU=UY scTr?dEɊT{Ǧev,YQߎzv&E9gVvrrfBjѡ]3BkX|Z~5ggnx|߯UI~YD alXZ*Aye2%>RI T8HI e@*Rw&Ea5' Wi0Zk7gRX[~HC}2Pq]j2|sјrJ  49 2iSן 9(6G\i3yib2&NPD=!:hŽNMB^ @ 8! 9^"G`- R%"V|7A K?M4N0a"A,&)^1le2=Wwmi%I~٘)T#a{&:vgѷyuma!Y:oIG8N}YydX5yÉXiYK`|s)j>R٣I9W%ŔbX6)͉ΉcTÀ#p& <D? M2DȑtB gʲ BtЏĭRYh31zGDVEi+1 ;['t6=!JF ?yeC`E@8ρ0|$p5t_ew'9>k301zHq"O_#:]Ψ:H;Õi- |Cը8cw N"A_FouWv}@}@K{rRP# Jt aM ֣->/i;c8ܧzW\>MFOm"*fn}=lƨ_}BM%q-#=:笴L9yW M9 Ë鬹Eig%&F Ɉ>, R{Zr eYk=R K}62Ɯ:DAx0 4O#6"T ;zη,x"xE#wůzo>%32gh0zQz2[@kBg ӻ[4,n(zky^z .2 h1Ag4 C(9J('xKǤ8/EQ$n)ZkSԠh:"!gm+SR 0Ņ2AW 8|gs~7+3t6lsQZ6=#Z9d7>TXv2/Ϸ@ˣH a eTLʖN>ul٨^tthw|n$d!.X?˳T]G]Aݶ˝E&gC\e;W6c}.+ Jo#&)sH+\Hc8wGo!`䳼Ծ29Dj<RhI0Q߱ nr6r#7|Pk/TC8*Z?˳6J߶rS } Ii=k?دlK)}K{.LAH^XQ-z2˓iU!g (x[2";.JwklTF+Ɇp@*>a&`,~g{P Ҩ ŢAC77~x|BJQ[OYRϜ0gK ssDfktThP+QkP>WX{֞vz/Cċ}ۋkP^c7`ݬ/㈗VR, AvgыYkjً.u-TKVS)ݻwT(A/FٗF!çB'~yCɆjM1YX(sc^;wmَhqY,ėq$[4޶~)znZEkk־X!j+VDۃY%b\'P ?|irFHX[԰gk6_5&tޘDۓc-cLwU-(jeV˯i2_l|7IK(>PPde! .h.ǥ>.O,R2,zDVYg زB[Pb=x x3+ϝ5j5pV#~*pEẇԎV4~uC'-S)C `] ưf~ c{a`?h$RjZ\NW/&ӽ 5ꪰ*^ &1YfaQX\,(%|i.]Ic8@ϖUT;yݛw6z>uoD(,/_b\ }~%Rڼ罧-BEGv ::o.^IֽjeȺ\K&p> *E$\gG"N9ExT&V2?dR1aQ^3}Rɀ9C#(% y"R<5Y5D = _urmŋc ߪp=gZY8{_iDqPzTiXgh(W^+_~~r0p1[:9~w%?6Wή+0d 5jlj3$V6[4[`1%`#k-}ƍ]<L^@7沒, y.m'H(JpI{[qN>(}JiMJ >3C~ ԊDŽu4&KE ӓ |1 bՁkPRH*"2un@PqW?` jT #{u+ *,`lkN0о -rKB0`ȥ+oum@[f`7[':l0ݞ铋 ûPZ4gP˙%;F<~oG]_-Ldhm957|-0Z*u#tN3O%wi_ QǬLwfҮLwetz% nW eLk2?a!VP 5IΕz0W{w(9ɻhurGb>œIdAٖjNvKhu{[o>?W7On-ӻV"/Vɹz8zCN9*1cߒ1˟{r..OWTCl9WXXwa\G8֏|ܻi4FoQ5A )zDثò3X.Թ[ǝlHFo8׻h(sȗ:{SQGZ?k%A<-ֶ[[Zs k)2WU>\GVgjX-Y E9hfڔ[y2>-v{eoݡIr?ӂӛg/zibѯZ\ um~ۚz|͑ƶϘϼz]4Bh:OnߛUyT]{ 2x7^{ז>@ >w@~O+hIȟ\D)۸WKvC[mAmK636nOӯ$6;VZmKvBB"L }vӔ*sۖ4he|FݶUw@B"$S- Igw 6E.CӶ~gm {ܲϼz@i'e*5ǺH=M37z22궳XDcrz~iBLKpEY(a2ТF@AF cLȉ0VeЂGi ܠ GጔoGLť4pʟ#.F I]^ 5j7F4 e%>7fY LǬÂՙ"ZRkDَZ>n !Fg'(O=*X򕎘t> XKWYg“ o[hfMBJk˴ޥU2=oHtF*?{۸[uWC0ɞ L0fc;dvTSMɒ)R"ekH<2E֭_m#Z>7bҁ‡}e"Dq'hCa/cy!8"YS)(keI(& XΎdg:ATWTVY@ڕ2d a:@\QSL0X,p` 76ÅY }^s21ހ(qhx_B1%sAM Bh`jEO=lukY`@[?p-88ٞ @ңP2sBY^P2X2,+K+6p?,"RU R9c\\c6R}vh{.K  J!ӇR} bm*DF郛`pE-(`9%)P YI`٬ 倎V2}l_Y&kL)Xb 1SLNVHQM APʾVAQ,PAAP_h"4LAQ}jHXY|KXBFe+_@8~9 &dքY*Q>sI-KtE36X[~z-i_`[=3Dfybh2x{sk~ 2"Sg%DuϯBlkw:-0x;boGJs F8![FD~k7GXUvDGͭ5`!$LF&i7D'6ֻ<㹔:^v əbCj.A퍒CGA $Y[S*,RǚvOJaiB1zÏ~<(#Tn%*mR^顳:7,LrkU\%0.KX6>/G$W=۳jחnk{Z L[W)QjZס,4SSxJY#KO -Zkc IfZ :4SJ>܍&%|qF-!7F}D8v ;PT VF uB# .jmIle$2@=[Y.C%y7_է/ O),\5M482V.BK:WF"W9̲0}u𧬍Zj hyc!? ~:E:3rqkaAH"qR,Ks\i̩2۬̔0{\(ˀDo徐"DJR h_>Vyo=ZV#tjiTYJd%Pd(4(Liә1JLY_:6m,E2d QT:m|~qUlQ1ʕTөLAFXsq~#]A">Txw_t)\|;=Li>??;"r0)rwB5+ۣO65$ Ga~:UB2+ċi ,L?LnnVe#= ፞eC^1Dޟ}>]vlZ( -᛫zBWK[VN{_s-.'cFҩF-cr0BdXvEv"2ņ,~8fts[T ǽ?(h3/-ek< ڪ@n-$, #ì>3Ug`D ӛf"#v}P ьF)MxsU]m#[>oN5=Tkbfs/_p\+e퍹F6ѺjcE6/e8ר+'hV6eBcٙ ?އ|ݎ$pn'TN#)j+_NO==V}ƔIyq(zƧN{K2+O7P\L9xu|A?'~\S# y&ʦj 5NnR[3V1M[(3־1һua!oD[g/=f[[ĘNg4n}"MڶwkFJwB޸.Td bZIv`N}gr7X] GبaST*?=zsٳW Q}Hu|h2"[فk M߄^Ad{EVwܶQzpF·nێYxU RS)0D-{Wz8fJ.Y!a \Nau<ٜZ#JlO@V 4& IBar jiBClYgJqicE`-v);5,.n(x+Q8QZ>PtA>÷HwvpaMuS![W16̽7zUoo2^Xo'm#a@B&=d*TJU#bjJKKrDV,͹vκT[j[:ҹ+/ B9_3B/^CT4@Y:hcpJLC5>+K%YzɅElq$l 1F6wT8686apYtIZV:[/ա$+;Ȯ!04H|>ߑ,V֔W2#r!N,(D҄T%/=6buWk aSBX% m9W l&a6@جr5c%$ ̀ -MV"H7ج45mK}q?r1)Aḷﲛ XOX5\"Vm] z X-qgHB@?DRl;oF>IJB;A`ֱy5"I")3kxNHEi%?"ܼBVd!\d!yl\B<b\E]# f&C+oPp; k4ꝛr2:lQ BHpatk殴Oӗu@p $W nCвװW$N$koNu([fT>R٘j nǿm{(6Zske BVw b> 6Ft lwfct$6FMM);HһډFޭ bL3xS;n;K"[MM)a 捻wk+i&ޭ]"3־<һua!oDlJGڔN3RYV5ӟ1Eć-Na?^ #HwRz)[+Oפ1e@q+vb!jJrzF^>Cd%) jKwqxiLWǟȧ-."#SI_ӳ_5!pE݋]>NF]C;?'5jrGd|z %z}N0)itHz4vw+\ƻa# &oϧmlk'PmӫQ5U(؇BMtPZFE 0F ::VYؔ%a\fĿ;rYw.M\+&.QaB 1eB |33D K#(K`VRѕPz^Lm mQ__S#tٚVG)GJ3_@t_0-OqRzEu!ĐT/RM1bH1V5` $"@Gii.+jL:РskMs-]& Ρ#"pQvR#)*(flh4^ 0H7A%#""Trߴ܈A)j735pxąu&+sN :MEj{"Ǵ%e3;,-6 fz/<.D£.ł L# 5ǂ٩gj)H8R㮔 xvܨ ըa]K6+A.!3|!H @ފŢl'/%ٻV)Kԅ.YdyȡfQ+ȳw֜ K/GdŦ#5n[I>%vZ}i^c\Fje.q(㌓&GSP ۍFF`MUVE9cq/$n흽b ^ry&ϫq^w{I dB&)T3[SJUZk?!糗 s0 ;7.E5Bj3<Ih|;o$c5_H_I!tjiOۡ {wxvz,y{bQXl1iͽ18g-ZIF9+[$g;IoT9r;N$V>OgpRlp`V0嘒jǥ!RjE˙E{H꫺TD3lE~EHm@Ntâ #krPLJ3I}UoʆD~Ϣvb]ˤaRօaQux˄Q8R+^/ 2˄J(P&"cY)6$' 28!((PvH꫺Ԛ 3lPv/֕/9HšZ)=lHFb]/Wuڂ& 2`q(IP2@8URkbٰ(;mH(Pw6 q1EYpۈpPTWșTe"_C텾q!6RkIeذa"*r(DʆBxX f"CsӧcMd3lS/>|/V3JHlmg@}pT&,qSk>}[<?NUc#-g^퀟1J;fϛ}5+}˷~{m9h ^D367뛰Shft)J4%hO-'T-\|H;dd~RC &j|3~ D*sT/C<;Tzp0ԟ$d%VCA1Jy@H盏E[ʋ}a6gPO Y:o߽}PuE:I[P֋y˯g̱ÉFE{r(P߾|pA2iTM aa@jO#[E,[~XW61To;b dkaҙ|<6rө>{KtxRSJ1s\sutG ^2ڐ2wI<=0/wy[H 9y[ۋUaZ/or]N]wcTHGQI;jG,R5'Q2C5a%|+-JpZdU],}WπZ h %&Ngڣ9Ȟe$L KeL$3i2Ӕ"D&𴌘Ox9-TGʸ3%gݪϺEN}賮lxI+}GĶA;`Aǡ J"NP' 2q(+$>닣?Тᗰvar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005416515615137125633017720 0ustar rootrootJan 30 11:56:02 crc systemd[1]: Starting Kubernetes Kubelet... Jan 30 11:56:02 crc restorecon[4673]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 30 11:56:02 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:03 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 30 11:56:04 crc restorecon[4673]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 30 11:56:04 crc restorecon[4673]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 30 11:56:04 crc kubenswrapper[4703]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 30 11:56:04 crc kubenswrapper[4703]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 30 11:56:04 crc kubenswrapper[4703]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 30 11:56:04 crc kubenswrapper[4703]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 30 11:56:04 crc kubenswrapper[4703]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 30 11:56:04 crc kubenswrapper[4703]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.766975 4703 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775191 4703 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775227 4703 feature_gate.go:330] unrecognized feature gate: Example Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775234 4703 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775240 4703 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775246 4703 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775252 4703 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775258 4703 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775263 4703 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775269 4703 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775275 4703 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775281 4703 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775287 4703 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775293 4703 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775298 4703 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775303 4703 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775309 4703 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775315 4703 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775323 4703 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775329 4703 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775334 4703 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775339 4703 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775344 4703 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775349 4703 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775354 4703 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775359 4703 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775364 4703 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775368 4703 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775443 4703 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775661 4703 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775668 4703 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775673 4703 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775680 4703 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775686 4703 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775692 4703 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775698 4703 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775703 4703 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775709 4703 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775714 4703 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775720 4703 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775725 4703 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775730 4703 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775739 4703 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775744 4703 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775749 4703 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775755 4703 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775760 4703 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775765 4703 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775774 4703 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775780 4703 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775785 4703 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775790 4703 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775795 4703 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775804 4703 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775809 4703 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775816 4703 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775824 4703 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775830 4703 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775835 4703 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775841 4703 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775846 4703 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775851 4703 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775857 4703 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775863 4703 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775876 4703 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775885 4703 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775891 4703 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775899 4703 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775906 4703 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775912 4703 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775918 4703 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.775924 4703 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776072 4703 flags.go:64] FLAG: --address="0.0.0.0" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776084 4703 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776100 4703 flags.go:64] FLAG: --anonymous-auth="true" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776109 4703 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776140 4703 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776148 4703 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776156 4703 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776163 4703 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776170 4703 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776180 4703 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776188 4703 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776199 4703 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776213 4703 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776221 4703 flags.go:64] FLAG: --cgroup-root="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776228 4703 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776235 4703 flags.go:64] FLAG: --client-ca-file="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776242 4703 flags.go:64] FLAG: --cloud-config="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776250 4703 flags.go:64] FLAG: --cloud-provider="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776262 4703 flags.go:64] FLAG: --cluster-dns="[]" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776276 4703 flags.go:64] FLAG: --cluster-domain="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776283 4703 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776289 4703 flags.go:64] FLAG: --config-dir="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776295 4703 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776302 4703 flags.go:64] FLAG: --container-log-max-files="5" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776311 4703 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776319 4703 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776333 4703 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776343 4703 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776352 4703 flags.go:64] FLAG: --contention-profiling="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776361 4703 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776368 4703 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776376 4703 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776383 4703 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776393 4703 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776401 4703 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776414 4703 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776422 4703 flags.go:64] FLAG: --enable-load-reader="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776430 4703 flags.go:64] FLAG: --enable-server="true" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776437 4703 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776447 4703 flags.go:64] FLAG: --event-burst="100" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776456 4703 flags.go:64] FLAG: --event-qps="50" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776464 4703 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776471 4703 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776483 4703 flags.go:64] FLAG: --eviction-hard="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776493 4703 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776500 4703 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776507 4703 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776515 4703 flags.go:64] FLAG: --eviction-soft="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776522 4703 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776529 4703 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776536 4703 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776543 4703 flags.go:64] FLAG: --experimental-mounter-path="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776554 4703 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776561 4703 flags.go:64] FLAG: --fail-swap-on="true" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776568 4703 flags.go:64] FLAG: --feature-gates="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776577 4703 flags.go:64] FLAG: --file-check-frequency="20s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776585 4703 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776592 4703 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776600 4703 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776608 4703 flags.go:64] FLAG: --healthz-port="10248" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776620 4703 flags.go:64] FLAG: --help="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776628 4703 flags.go:64] FLAG: --hostname-override="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776636 4703 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776644 4703 flags.go:64] FLAG: --http-check-frequency="20s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776652 4703 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776660 4703 flags.go:64] FLAG: --image-credential-provider-config="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776667 4703 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776674 4703 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776682 4703 flags.go:64] FLAG: --image-service-endpoint="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776693 4703 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776700 4703 flags.go:64] FLAG: --kube-api-burst="100" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776708 4703 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776717 4703 flags.go:64] FLAG: --kube-api-qps="50" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776725 4703 flags.go:64] FLAG: --kube-reserved="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776732 4703 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776739 4703 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776747 4703 flags.go:64] FLAG: --kubelet-cgroups="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776754 4703 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776767 4703 flags.go:64] FLAG: --lock-file="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776773 4703 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776780 4703 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776788 4703 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776800 4703 flags.go:64] FLAG: --log-json-split-stream="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776807 4703 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776814 4703 flags.go:64] FLAG: --log-text-split-stream="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776821 4703 flags.go:64] FLAG: --logging-format="text" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776833 4703 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776842 4703 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776849 4703 flags.go:64] FLAG: --manifest-url="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776856 4703 flags.go:64] FLAG: --manifest-url-header="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776866 4703 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776873 4703 flags.go:64] FLAG: --max-open-files="1000000" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776883 4703 flags.go:64] FLAG: --max-pods="110" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776890 4703 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776903 4703 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776910 4703 flags.go:64] FLAG: --memory-manager-policy="None" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776917 4703 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776925 4703 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776932 4703 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776939 4703 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776960 4703 flags.go:64] FLAG: --node-status-max-images="50" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776973 4703 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776981 4703 flags.go:64] FLAG: --oom-score-adj="-999" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.776988 4703 flags.go:64] FLAG: --pod-cidr="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777026 4703 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777038 4703 flags.go:64] FLAG: --pod-manifest-path="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777045 4703 flags.go:64] FLAG: --pod-max-pids="-1" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777053 4703 flags.go:64] FLAG: --pods-per-core="0" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777066 4703 flags.go:64] FLAG: --port="10250" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777116 4703 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777145 4703 flags.go:64] FLAG: --provider-id="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777153 4703 flags.go:64] FLAG: --qos-reserved="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777161 4703 flags.go:64] FLAG: --read-only-port="10255" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777179 4703 flags.go:64] FLAG: --register-node="true" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777186 4703 flags.go:64] FLAG: --register-schedulable="true" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777194 4703 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777208 4703 flags.go:64] FLAG: --registry-burst="10" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777223 4703 flags.go:64] FLAG: --registry-qps="5" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777230 4703 flags.go:64] FLAG: --reserved-cpus="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777237 4703 flags.go:64] FLAG: --reserved-memory="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777247 4703 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777255 4703 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777261 4703 flags.go:64] FLAG: --rotate-certificates="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777268 4703 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777274 4703 flags.go:64] FLAG: --runonce="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777284 4703 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777290 4703 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777297 4703 flags.go:64] FLAG: --seccomp-default="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777305 4703 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777312 4703 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777319 4703 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777325 4703 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777331 4703 flags.go:64] FLAG: --storage-driver-password="root" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777337 4703 flags.go:64] FLAG: --storage-driver-secure="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777344 4703 flags.go:64] FLAG: --storage-driver-table="stats" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777351 4703 flags.go:64] FLAG: --storage-driver-user="root" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777360 4703 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777380 4703 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777388 4703 flags.go:64] FLAG: --system-cgroups="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777396 4703 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777409 4703 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777415 4703 flags.go:64] FLAG: --tls-cert-file="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777422 4703 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777438 4703 flags.go:64] FLAG: --tls-min-version="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777446 4703 flags.go:64] FLAG: --tls-private-key-file="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777453 4703 flags.go:64] FLAG: --topology-manager-policy="none" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777461 4703 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777468 4703 flags.go:64] FLAG: --topology-manager-scope="container" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777475 4703 flags.go:64] FLAG: --v="2" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777485 4703 flags.go:64] FLAG: --version="false" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777494 4703 flags.go:64] FLAG: --vmodule="" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777508 4703 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.777517 4703 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.777993 4703 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778011 4703 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778019 4703 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778026 4703 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778032 4703 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778038 4703 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778045 4703 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778052 4703 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778059 4703 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778065 4703 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778073 4703 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778081 4703 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778089 4703 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778097 4703 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778105 4703 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778111 4703 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778150 4703 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778159 4703 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778165 4703 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778172 4703 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778179 4703 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778186 4703 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778193 4703 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778198 4703 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778203 4703 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778208 4703 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778213 4703 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778218 4703 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778223 4703 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778228 4703 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778233 4703 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778237 4703 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778242 4703 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778248 4703 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778253 4703 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778258 4703 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778274 4703 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778285 4703 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778290 4703 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778295 4703 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778300 4703 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778305 4703 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778310 4703 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778315 4703 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778320 4703 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778325 4703 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778330 4703 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778335 4703 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778340 4703 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778344 4703 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778349 4703 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778355 4703 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778360 4703 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778364 4703 feature_gate.go:330] unrecognized feature gate: Example Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778370 4703 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778374 4703 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778379 4703 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778384 4703 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778391 4703 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778397 4703 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778406 4703 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778412 4703 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778417 4703 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778423 4703 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778429 4703 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778436 4703 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778441 4703 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778448 4703 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778454 4703 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778459 4703 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.778465 4703 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.778473 4703 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.789275 4703 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.789308 4703 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789425 4703 feature_gate.go:330] unrecognized feature gate: Example Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789438 4703 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789448 4703 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789458 4703 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789467 4703 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789478 4703 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789489 4703 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789498 4703 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789507 4703 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789516 4703 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789524 4703 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789537 4703 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789549 4703 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789558 4703 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789568 4703 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789578 4703 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789588 4703 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789597 4703 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789605 4703 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789615 4703 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789624 4703 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789634 4703 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789645 4703 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789654 4703 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789663 4703 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789672 4703 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789681 4703 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789690 4703 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789699 4703 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789708 4703 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789716 4703 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789724 4703 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789733 4703 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789742 4703 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789753 4703 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789764 4703 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789776 4703 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789787 4703 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789797 4703 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789809 4703 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789820 4703 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789831 4703 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789840 4703 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789850 4703 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789860 4703 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789869 4703 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789878 4703 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789887 4703 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789897 4703 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789906 4703 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789914 4703 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789922 4703 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789931 4703 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789940 4703 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789949 4703 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789985 4703 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.789997 4703 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790008 4703 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790019 4703 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790033 4703 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790044 4703 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790053 4703 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790064 4703 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790073 4703 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790081 4703 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790090 4703 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790099 4703 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790107 4703 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790142 4703 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790151 4703 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790160 4703 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.790174 4703 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790407 4703 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790421 4703 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790431 4703 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790441 4703 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790450 4703 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790458 4703 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790467 4703 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790479 4703 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790490 4703 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790500 4703 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790510 4703 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790520 4703 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790530 4703 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790539 4703 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790547 4703 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790556 4703 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790565 4703 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790574 4703 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790585 4703 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790597 4703 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790608 4703 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790619 4703 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790629 4703 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790639 4703 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790648 4703 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790657 4703 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790666 4703 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790676 4703 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790685 4703 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790694 4703 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790705 4703 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790714 4703 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790725 4703 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790734 4703 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790743 4703 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790751 4703 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790760 4703 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790769 4703 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790778 4703 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790787 4703 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790795 4703 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790804 4703 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790815 4703 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790825 4703 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790834 4703 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790843 4703 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790852 4703 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790860 4703 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790869 4703 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790879 4703 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790887 4703 feature_gate.go:330] unrecognized feature gate: Example Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790896 4703 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790905 4703 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790914 4703 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790923 4703 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790931 4703 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790942 4703 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790951 4703 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790959 4703 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790968 4703 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790977 4703 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790985 4703 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.790994 4703 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.791002 4703 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.791011 4703 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.791019 4703 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.791028 4703 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.791037 4703 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.791046 4703 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.791055 4703 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.791063 4703 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.791076 4703 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.795790 4703 server.go:940] "Client rotation is on, will bootstrap in background" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.804435 4703 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.804588 4703 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.806538 4703 server.go:997] "Starting client certificate rotation" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.806593 4703 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.806870 4703 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-23 05:14:00.368311562 +0000 UTC Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.806995 4703 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.839933 4703 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 30 11:56:04 crc kubenswrapper[4703]: E0130 11:56:04.843375 4703 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.129.56.250:6443: connect: connection refused" logger="UnhandledError" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.847245 4703 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.861478 4703 log.go:25] "Validated CRI v1 runtime API" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.913234 4703 log.go:25] "Validated CRI v1 image API" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.922610 4703 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.927741 4703 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-30-11-50-46-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.927769 4703 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.952030 4703 manager.go:217] Machine: {Timestamp:2026-01-30 11:56:04.948745286 +0000 UTC m=+0.726566980 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654132736 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:4234120e-d9fe-419a-b814-ae76a780f4ec BootID:0ee38ffa-0913-4f86-a89a-42f0682f685a Filesystems:[{Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827068416 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730829824 Type:vfs Inodes:819200 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:7b:f3:2c Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:7b:f3:2c Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:91:43:4a Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:b8:fc:3f Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:c7:0f:00 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:eb:3b:94 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:22:10:4e:96:2f:28 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:be:f9:1c:49:6a:58 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654132736 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.952318 4703 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.952502 4703 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.954931 4703 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.955355 4703 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.955403 4703 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.955659 4703 topology_manager.go:138] "Creating topology manager with none policy" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.955673 4703 container_manager_linux.go:303] "Creating device plugin manager" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.956180 4703 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.956203 4703 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.956448 4703 state_mem.go:36] "Initialized new in-memory state store" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.957117 4703 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.961715 4703 kubelet.go:418] "Attempting to sync node with API server" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.961765 4703 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.961786 4703 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.961803 4703 kubelet.go:324] "Adding apiserver pod source" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.961819 4703 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.965576 4703 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.966611 4703 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.968430 4703 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.970366 4703 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.970404 4703 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.970413 4703 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.970423 4703 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.970448 4703 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.970468 4703 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.970481 4703 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.970502 4703 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.970516 4703 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.970528 4703 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.970442 4703 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:04 crc kubenswrapper[4703]: W0130 11:56:04.970479 4703 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.970571 4703 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.970585 4703 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 30 11:56:04 crc kubenswrapper[4703]: E0130 11:56:04.970587 4703 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.250:6443: connect: connection refused" logger="UnhandledError" Jan 30 11:56:04 crc kubenswrapper[4703]: E0130 11:56:04.970592 4703 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.129.56.250:6443: connect: connection refused" logger="UnhandledError" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.974332 4703 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.975089 4703 server.go:1280] "Started kubelet" Jan 30 11:56:04 crc systemd[1]: Started Kubernetes Kubelet. Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.976516 4703 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.977116 4703 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.976629 4703 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.978236 4703 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.978500 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.978537 4703 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 30 11:56:04 crc kubenswrapper[4703]: E0130 11:56:04.978768 4703 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.979310 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 20:45:29.310744596 +0000 UTC Jan 30 11:56:04 crc kubenswrapper[4703]: E0130 11:56:04.979370 4703 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" interval="200ms" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.979405 4703 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.979680 4703 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.979536 4703 factory.go:55] Registering systemd factory Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.979810 4703 factory.go:221] Registration of the systemd container factory successfully Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.979427 4703 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 30 11:56:04 crc kubenswrapper[4703]: I0130 11:56:04.982279 4703 server.go:460] "Adding debug handlers to kubelet server" Jan 30 11:56:05 crc kubenswrapper[4703]: W0130 11:56:05.041575 4703 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:05 crc kubenswrapper[4703]: E0130 11:56:05.041745 4703 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.250:6443: connect: connection refused" logger="UnhandledError" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.042033 4703 factory.go:153] Registering CRI-O factory Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.042063 4703 factory.go:221] Registration of the crio container factory successfully Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.042197 4703 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.042225 4703 factory.go:103] Registering Raw factory Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.042243 4703 manager.go:1196] Started watching for new ooms in manager Jan 30 11:56:05 crc kubenswrapper[4703]: E0130 11:56:05.043376 4703 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.129.56.250:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188f8039f40065c3 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 11:56:04.975052227 +0000 UTC m=+0.752873881,LastTimestamp:2026-01-30 11:56:04.975052227 +0000 UTC m=+0.752873881,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.046344 4703 manager.go:319] Starting recovery of all containers Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050554 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050603 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050617 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050633 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050646 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050658 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050670 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050681 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050694 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050706 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050718 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050731 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050743 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050759 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050771 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050784 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050797 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050809 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050827 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050841 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050854 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050867 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050880 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050892 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050904 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050916 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050933 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050947 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050960 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050972 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.050987 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051000 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051012 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051052 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051065 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051076 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051085 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051096 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051106 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051152 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051164 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051174 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051184 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051193 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051202 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051212 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051221 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051231 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051240 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051249 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051258 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051271 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051286 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051298 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051311 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051323 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051335 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051367 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051378 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051387 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051396 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051405 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051415 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051426 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051435 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051445 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051455 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051464 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051474 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051483 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051492 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051501 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051511 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051519 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051528 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051538 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051547 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051556 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051566 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051575 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051585 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051594 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051604 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051613 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051623 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051632 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051640 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051649 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051658 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051667 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051675 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051684 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051693 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051703 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051711 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051722 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051732 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051741 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051750 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051760 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051770 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051779 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051789 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051798 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051813 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051823 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051833 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051844 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051854 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051864 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051877 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051893 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051907 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051919 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051929 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051938 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051947 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051956 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051966 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051981 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.051996 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052008 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052020 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052030 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052039 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052048 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052060 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052074 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052087 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052099 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052111 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052140 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052149 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052161 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052172 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052183 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052193 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052204 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052215 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052226 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052235 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.052244 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054187 4703 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054216 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054232 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054242 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054251 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054263 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054278 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054291 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054304 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054318 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054333 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054344 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054357 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054368 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054380 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054392 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054404 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054417 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054429 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054451 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054464 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054477 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054490 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054503 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054515 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054528 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054540 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054552 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054565 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054577 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054591 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054603 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054617 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054631 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054646 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054760 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054783 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054819 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054832 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054847 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054860 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054873 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054887 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054905 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054926 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054938 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054952 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054966 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054979 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.054993 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055008 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055037 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055051 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055066 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055080 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055094 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055108 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055136 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055149 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055162 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055174 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055186 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055199 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055212 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055225 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055239 4703 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055250 4703 reconstruct.go:97] "Volume reconstruction finished" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.055258 4703 reconciler.go:26] "Reconciler: start to sync state" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.059516 4703 manager.go:324] Recovery completed Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.075052 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.078551 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.078606 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.078619 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:05 crc kubenswrapper[4703]: E0130 11:56:05.079584 4703 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.080343 4703 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.080378 4703 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.080404 4703 state_mem.go:36] "Initialized new in-memory state store" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.083670 4703 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.084998 4703 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.085039 4703 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.085060 4703 kubelet.go:2335] "Starting kubelet main sync loop" Jan 30 11:56:05 crc kubenswrapper[4703]: E0130 11:56:05.085104 4703 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 30 11:56:05 crc kubenswrapper[4703]: W0130 11:56:05.087137 4703 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:05 crc kubenswrapper[4703]: E0130 11:56:05.087202 4703 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.129.56.250:6443: connect: connection refused" logger="UnhandledError" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.109263 4703 policy_none.go:49] "None policy: Start" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.110511 4703 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.110550 4703 state_mem.go:35] "Initializing new in-memory state store" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.167432 4703 manager.go:334] "Starting Device Plugin manager" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.167481 4703 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.167493 4703 server.go:79] "Starting device plugin registration server" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.167904 4703 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.167931 4703 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.168588 4703 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.168756 4703 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.168771 4703 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 30 11:56:05 crc kubenswrapper[4703]: E0130 11:56:05.174866 4703 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 11:56:05 crc kubenswrapper[4703]: E0130 11:56:05.180722 4703 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" interval="400ms" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.185804 4703 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc"] Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.185932 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.187205 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.187257 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.187268 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.187443 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.187835 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.187902 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.188280 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.188355 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.188366 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.188484 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.188623 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.188672 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.188697 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.188710 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.188719 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.189636 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.189654 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.189662 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.189827 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.189843 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.189851 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.189911 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.190101 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.190152 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.190490 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.190517 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.190529 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.190688 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.190742 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.190782 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.190977 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.191054 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.191115 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.191736 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.191766 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.191777 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.192016 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.192052 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.192147 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.192218 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.192286 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.192773 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.192873 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.192954 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.268072 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.269694 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.269797 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.269820 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.269859 4703 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 30 11:56:05 crc kubenswrapper[4703]: E0130 11:56:05.270517 4703 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.250:6443: connect: connection refused" node="crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.358359 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.358439 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.358471 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.358493 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.358514 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.358601 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.358674 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.358717 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.358743 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.358770 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.358793 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.358820 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.358852 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.358877 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.358895 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460437 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460525 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460554 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460579 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460604 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460637 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460662 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460684 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460709 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460730 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460754 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460776 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460769 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460850 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460794 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460914 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460948 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460970 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.460978 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.461038 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.461054 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.461086 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.461186 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.461222 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.461252 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.461293 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.461335 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.461368 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.461403 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.461436 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.471359 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.478827 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.478873 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.478892 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.478928 4703 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 30 11:56:05 crc kubenswrapper[4703]: E0130 11:56:05.479580 4703 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.250:6443: connect: connection refused" node="crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.529072 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.539774 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.546472 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.571199 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.578258 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 30 11:56:05 crc kubenswrapper[4703]: W0130 11:56:05.578592 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-0c04d8d7687969f07d819323281a48815ea1c3bd3319a4d6e815b9b41ed4e17d WatchSource:0}: Error finding container 0c04d8d7687969f07d819323281a48815ea1c3bd3319a4d6e815b9b41ed4e17d: Status 404 returned error can't find the container with id 0c04d8d7687969f07d819323281a48815ea1c3bd3319a4d6e815b9b41ed4e17d Jan 30 11:56:05 crc kubenswrapper[4703]: E0130 11:56:05.581838 4703 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" interval="800ms" Jan 30 11:56:05 crc kubenswrapper[4703]: W0130 11:56:05.589582 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-38d6a12b35f4336c3d39e809c7e77d6bed8804a8df8be7fcd95422b51ed7e5bc WatchSource:0}: Error finding container 38d6a12b35f4336c3d39e809c7e77d6bed8804a8df8be7fcd95422b51ed7e5bc: Status 404 returned error can't find the container with id 38d6a12b35f4336c3d39e809c7e77d6bed8804a8df8be7fcd95422b51ed7e5bc Jan 30 11:56:05 crc kubenswrapper[4703]: W0130 11:56:05.594000 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-d56e6a5f6200d5d89313c903f513a62eefede93a90dad1134e17d5d2142ddd93 WatchSource:0}: Error finding container d56e6a5f6200d5d89313c903f513a62eefede93a90dad1134e17d5d2142ddd93: Status 404 returned error can't find the container with id d56e6a5f6200d5d89313c903f513a62eefede93a90dad1134e17d5d2142ddd93 Jan 30 11:56:05 crc kubenswrapper[4703]: W0130 11:56:05.597775 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-507f45106e9d38bb2b420827a8770de4d1dd681645ed1e7d8d3555de6e5adf90 WatchSource:0}: Error finding container 507f45106e9d38bb2b420827a8770de4d1dd681645ed1e7d8d3555de6e5adf90: Status 404 returned error can't find the container with id 507f45106e9d38bb2b420827a8770de4d1dd681645ed1e7d8d3555de6e5adf90 Jan 30 11:56:05 crc kubenswrapper[4703]: W0130 11:56:05.599740 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-b3a95ea39da3d5377ac52d5666bd56fc7dacae2a5f77630c489471cf11578581 WatchSource:0}: Error finding container b3a95ea39da3d5377ac52d5666bd56fc7dacae2a5f77630c489471cf11578581: Status 404 returned error can't find the container with id b3a95ea39da3d5377ac52d5666bd56fc7dacae2a5f77630c489471cf11578581 Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.879712 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.881013 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.881098 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.881176 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.881220 4703 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 30 11:56:05 crc kubenswrapper[4703]: E0130 11:56:05.881740 4703 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.250:6443: connect: connection refused" node="crc" Jan 30 11:56:05 crc kubenswrapper[4703]: W0130 11:56:05.922454 4703 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:05 crc kubenswrapper[4703]: E0130 11:56:05.922548 4703 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.129.56.250:6443: connect: connection refused" logger="UnhandledError" Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.979447 4703 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:05 crc kubenswrapper[4703]: I0130 11:56:05.979531 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 07:01:15.659766602 +0000 UTC Jan 30 11:56:06 crc kubenswrapper[4703]: I0130 11:56:06.089758 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0c04d8d7687969f07d819323281a48815ea1c3bd3319a4d6e815b9b41ed4e17d"} Jan 30 11:56:06 crc kubenswrapper[4703]: I0130 11:56:06.091508 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"b3a95ea39da3d5377ac52d5666bd56fc7dacae2a5f77630c489471cf11578581"} Jan 30 11:56:06 crc kubenswrapper[4703]: I0130 11:56:06.093115 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"507f45106e9d38bb2b420827a8770de4d1dd681645ed1e7d8d3555de6e5adf90"} Jan 30 11:56:06 crc kubenswrapper[4703]: I0130 11:56:06.094200 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"d56e6a5f6200d5d89313c903f513a62eefede93a90dad1134e17d5d2142ddd93"} Jan 30 11:56:06 crc kubenswrapper[4703]: I0130 11:56:06.095457 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"38d6a12b35f4336c3d39e809c7e77d6bed8804a8df8be7fcd95422b51ed7e5bc"} Jan 30 11:56:06 crc kubenswrapper[4703]: W0130 11:56:06.332254 4703 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:06 crc kubenswrapper[4703]: E0130 11:56:06.332712 4703 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.129.56.250:6443: connect: connection refused" logger="UnhandledError" Jan 30 11:56:06 crc kubenswrapper[4703]: W0130 11:56:06.333555 4703 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:06 crc kubenswrapper[4703]: E0130 11:56:06.333605 4703 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.250:6443: connect: connection refused" logger="UnhandledError" Jan 30 11:56:06 crc kubenswrapper[4703]: E0130 11:56:06.382890 4703 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" interval="1.6s" Jan 30 11:56:06 crc kubenswrapper[4703]: W0130 11:56:06.454927 4703 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:06 crc kubenswrapper[4703]: E0130 11:56:06.455104 4703 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.250:6443: connect: connection refused" logger="UnhandledError" Jan 30 11:56:06 crc kubenswrapper[4703]: I0130 11:56:06.682537 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:06 crc kubenswrapper[4703]: I0130 11:56:06.684305 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:06 crc kubenswrapper[4703]: I0130 11:56:06.684341 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:06 crc kubenswrapper[4703]: I0130 11:56:06.684350 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:06 crc kubenswrapper[4703]: I0130 11:56:06.684380 4703 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 30 11:56:06 crc kubenswrapper[4703]: E0130 11:56:06.684748 4703 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.250:6443: connect: connection refused" node="crc" Jan 30 11:56:06 crc kubenswrapper[4703]: I0130 11:56:06.979541 4703 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:06 crc kubenswrapper[4703]: I0130 11:56:06.979638 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 17:22:18.364016249 +0000 UTC Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.017405 4703 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 30 11:56:07 crc kubenswrapper[4703]: E0130 11:56:07.019031 4703 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.129.56.250:6443: connect: connection refused" logger="UnhandledError" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.100065 4703 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3" exitCode=0 Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.100151 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3"} Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.100299 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.102099 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.102172 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.102195 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.103397 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15"} Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.103878 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59"} Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.103902 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63"} Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.103917 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29"} Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.103976 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.107889 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.108856 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.108909 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.110348 4703 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2" exitCode=0 Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.110418 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2"} Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.110442 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.111865 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.111927 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.111953 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.113240 4703 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2" exitCode=0 Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.113341 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.113370 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2"} Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.114874 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.115115 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.115356 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.115888 4703 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="3e4862890012338bc9c73abe374c27de5319127544543a7a7bb6af992c287612" exitCode=0 Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.115936 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"3e4862890012338bc9c73abe374c27de5319127544543a7a7bb6af992c287612"} Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.116056 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.117416 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.117471 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.117491 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.251260 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.252830 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.252856 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.252867 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.979505 4703 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:07 crc kubenswrapper[4703]: I0130 11:56:07.980696 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 12:41:09.867541189 +0000 UTC Jan 30 11:56:07 crc kubenswrapper[4703]: E0130 11:56:07.984296 4703 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" interval="3.2s" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.121272 4703 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="7c5c24e73e0378455aef55023c1da4bc5434e9c613367b6ee81854001c56e070" exitCode=0 Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.121348 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"7c5c24e73e0378455aef55023c1da4bc5434e9c613367b6ee81854001c56e070"} Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.121405 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.123007 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.123036 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.123046 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.126712 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"39eca81eb300b1b4d7a68731db92f76c91270b0bf49f7ae9bcf9643559bcb722"} Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.126747 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"bdf1201e129d6158cd5ad6dfb3e93f5ec2a5e75c738edd2dc3bd197e813d6ac5"} Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.126763 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"bb1699595c372168389cb480f8c41f41a23f856d321138a04599628f1d4e19cd"} Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.126767 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.127849 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.127881 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.127895 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.129806 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4"} Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.129918 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811"} Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.129943 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b"} Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.132263 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"ca4cdadd36f3e4479c6957f86bd28947800e8c499b7a0990f3e303e7970b2d89"} Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.132292 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.132300 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.133594 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.133639 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.133659 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.133779 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.133824 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.133834 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.285646 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.286653 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.286679 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.286691 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.286715 4703 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 30 11:56:08 crc kubenswrapper[4703]: E0130 11:56:08.287263 4703 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.250:6443: connect: connection refused" node="crc" Jan 30 11:56:08 crc kubenswrapper[4703]: W0130 11:56:08.729455 4703 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:08 crc kubenswrapper[4703]: E0130 11:56:08.729555 4703 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.129.56.250:6443: connect: connection refused" logger="UnhandledError" Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.979181 4703 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:08 crc kubenswrapper[4703]: I0130 11:56:08.981295 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 00:19:33.122844188 +0000 UTC Jan 30 11:56:09 crc kubenswrapper[4703]: W0130 11:56:09.109462 4703 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:09 crc kubenswrapper[4703]: E0130 11:56:09.109550 4703 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.250:6443: connect: connection refused" logger="UnhandledError" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.138336 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f1a709792a4ecf89e5c018f68db5b3bfdcc3284f2606053f9f699e901994ab00"} Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.138401 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a"} Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.138495 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.139732 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.139765 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.139775 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.142074 4703 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="b8073aa1ab6fa6059ef8e0166bd98c5093e50995057da89705fbe84930521dac" exitCode=0 Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.142189 4703 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.142233 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.142809 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.143069 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"b8073aa1ab6fa6059ef8e0166bd98c5093e50995057da89705fbe84930521dac"} Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.143143 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.143802 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.143827 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.143839 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.144428 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.144450 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.144458 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.144764 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.144781 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.144789 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.274853 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.275003 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.275166 4703 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.275218 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.276010 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.276043 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.276053 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:09 crc kubenswrapper[4703]: W0130 11:56:09.401046 4703 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:09 crc kubenswrapper[4703]: E0130 11:56:09.401146 4703 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.129.56.250:6443: connect: connection refused" logger="UnhandledError" Jan 30 11:56:09 crc kubenswrapper[4703]: W0130 11:56:09.552199 4703 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:09 crc kubenswrapper[4703]: E0130 11:56:09.552290 4703 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.250:6443: connect: connection refused" logger="UnhandledError" Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.979548 4703 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:09 crc kubenswrapper[4703]: I0130 11:56:09.982255 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 22:58:36.105359038 +0000 UTC Jan 30 11:56:10 crc kubenswrapper[4703]: I0130 11:56:10.149240 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"3d969f6a449492f80217365d4fef8a9e7c302491081301eac57b30ed0ea6ac65"} Jan 30 11:56:10 crc kubenswrapper[4703]: I0130 11:56:10.149282 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2abd05b4fe2583b0862c3a746dfe5f395f579562e27fb2c5a5b9e45f15683b12"} Jan 30 11:56:10 crc kubenswrapper[4703]: I0130 11:56:10.149297 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ce7a3c6cecb06c3b1c48c12feaf2c5afb8df7b18a2bdf2749ebaac2c4398952a"} Jan 30 11:56:10 crc kubenswrapper[4703]: I0130 11:56:10.149309 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c96c2d502863efd645a409e26825cee60ad08f0993460e694008488a07f1cb06"} Jan 30 11:56:10 crc kubenswrapper[4703]: I0130 11:56:10.149317 4703 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 11:56:10 crc kubenswrapper[4703]: I0130 11:56:10.149371 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:10 crc kubenswrapper[4703]: I0130 11:56:10.150309 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:10 crc kubenswrapper[4703]: I0130 11:56:10.150345 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:10 crc kubenswrapper[4703]: I0130 11:56:10.150355 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:10 crc kubenswrapper[4703]: I0130 11:56:10.978695 4703 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.250:6443: connect: connection refused Jan 30 11:56:10 crc kubenswrapper[4703]: I0130 11:56:10.983219 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 11:26:52.813761254 +0000 UTC Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.153115 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.154575 4703 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f1a709792a4ecf89e5c018f68db5b3bfdcc3284f2606053f9f699e901994ab00" exitCode=255 Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.154650 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"f1a709792a4ecf89e5c018f68db5b3bfdcc3284f2606053f9f699e901994ab00"} Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.154787 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.155536 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.155565 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.155575 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.156113 4703 scope.go:117] "RemoveContainer" containerID="f1a709792a4ecf89e5c018f68db5b3bfdcc3284f2606053f9f699e901994ab00" Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.161491 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"3cfbb3957994fbace415cea1e8381c0ec9ab6c036fe32768c63e957635694ccc"} Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.161593 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.162523 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.162553 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.162563 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:11 crc kubenswrapper[4703]: E0130 11:56:11.185473 4703 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" interval="6.4s" Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.369409 4703 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 30 11:56:11 crc kubenswrapper[4703]: E0130 11:56:11.370543 4703 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.129.56.250:6443: connect: connection refused" logger="UnhandledError" Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.487382 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.488731 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.488766 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.488776 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.488796 4703 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 30 11:56:11 crc kubenswrapper[4703]: E0130 11:56:11.489297 4703 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.250:6443: connect: connection refused" node="crc" Jan 30 11:56:11 crc kubenswrapper[4703]: I0130 11:56:11.983340 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 12:05:41.467082845 +0000 UTC Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.007643 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.166912 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.168618 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771"} Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.168694 4703 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.168717 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.168755 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.169630 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.169659 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.169670 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.169677 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.169691 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.169698 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.296146 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.376096 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.376309 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.377331 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.377361 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.377372 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.623578 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 30 11:56:12 crc kubenswrapper[4703]: I0130 11:56:12.983831 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 05:29:39.481211812 +0000 UTC Jan 30 11:56:13 crc kubenswrapper[4703]: I0130 11:56:13.170049 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 11:56:13 crc kubenswrapper[4703]: I0130 11:56:13.172403 4703 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 11:56:13 crc kubenswrapper[4703]: I0130 11:56:13.172460 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:13 crc kubenswrapper[4703]: I0130 11:56:13.172480 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:13 crc kubenswrapper[4703]: I0130 11:56:13.172425 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:13 crc kubenswrapper[4703]: I0130 11:56:13.174163 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:13 crc kubenswrapper[4703]: I0130 11:56:13.174235 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:13 crc kubenswrapper[4703]: I0130 11:56:13.174286 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:13 crc kubenswrapper[4703]: I0130 11:56:13.174241 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:13 crc kubenswrapper[4703]: I0130 11:56:13.174310 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:13 crc kubenswrapper[4703]: I0130 11:56:13.174333 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:13 crc kubenswrapper[4703]: I0130 11:56:13.174540 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:13 crc kubenswrapper[4703]: I0130 11:56:13.174579 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:13 crc kubenswrapper[4703]: I0130 11:56:13.174591 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:13 crc kubenswrapper[4703]: I0130 11:56:13.622250 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:13 crc kubenswrapper[4703]: I0130 11:56:13.984898 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 05:15:18.19908233 +0000 UTC Jan 30 11:56:14 crc kubenswrapper[4703]: I0130 11:56:14.175710 4703 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 11:56:14 crc kubenswrapper[4703]: I0130 11:56:14.175869 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:14 crc kubenswrapper[4703]: I0130 11:56:14.177270 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:14 crc kubenswrapper[4703]: I0130 11:56:14.177326 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:14 crc kubenswrapper[4703]: I0130 11:56:14.177340 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:14 crc kubenswrapper[4703]: I0130 11:56:14.378435 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 11:56:14 crc kubenswrapper[4703]: I0130 11:56:14.378879 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:14 crc kubenswrapper[4703]: I0130 11:56:14.380671 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:14 crc kubenswrapper[4703]: I0130 11:56:14.380708 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:14 crc kubenswrapper[4703]: I0130 11:56:14.380719 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:14 crc kubenswrapper[4703]: I0130 11:56:14.853534 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:14 crc kubenswrapper[4703]: I0130 11:56:14.951305 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 11:56:14 crc kubenswrapper[4703]: I0130 11:56:14.951614 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:14 crc kubenswrapper[4703]: I0130 11:56:14.952954 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:14 crc kubenswrapper[4703]: I0130 11:56:14.953012 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:14 crc kubenswrapper[4703]: I0130 11:56:14.953024 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:14 crc kubenswrapper[4703]: I0130 11:56:14.985568 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 22:48:11.899542731 +0000 UTC Jan 30 11:56:15 crc kubenswrapper[4703]: E0130 11:56:15.175169 4703 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 30 11:56:15 crc kubenswrapper[4703]: I0130 11:56:15.177574 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:15 crc kubenswrapper[4703]: I0130 11:56:15.181364 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:15 crc kubenswrapper[4703]: I0130 11:56:15.181459 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:15 crc kubenswrapper[4703]: I0130 11:56:15.181488 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:15 crc kubenswrapper[4703]: I0130 11:56:15.523505 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 30 11:56:15 crc kubenswrapper[4703]: I0130 11:56:15.523742 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:15 crc kubenswrapper[4703]: I0130 11:56:15.524988 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:15 crc kubenswrapper[4703]: I0130 11:56:15.525037 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:15 crc kubenswrapper[4703]: I0130 11:56:15.525050 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:15 crc kubenswrapper[4703]: I0130 11:56:15.986179 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 01:32:17.966992412 +0000 UTC Jan 30 11:56:16 crc kubenswrapper[4703]: I0130 11:56:16.170464 4703 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 30 11:56:16 crc kubenswrapper[4703]: I0130 11:56:16.170613 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 11:56:16 crc kubenswrapper[4703]: I0130 11:56:16.179263 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:16 crc kubenswrapper[4703]: I0130 11:56:16.180089 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:16 crc kubenswrapper[4703]: I0130 11:56:16.180141 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:16 crc kubenswrapper[4703]: I0130 11:56:16.180154 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:16 crc kubenswrapper[4703]: I0130 11:56:16.986928 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 05:11:00.661622631 +0000 UTC Jan 30 11:56:17 crc kubenswrapper[4703]: I0130 11:56:17.890320 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:17 crc kubenswrapper[4703]: I0130 11:56:17.891925 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:17 crc kubenswrapper[4703]: I0130 11:56:17.891954 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:17 crc kubenswrapper[4703]: I0130 11:56:17.891963 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:17 crc kubenswrapper[4703]: I0130 11:56:17.891986 4703 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 30 11:56:17 crc kubenswrapper[4703]: I0130 11:56:17.987237 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 17:59:53.995210637 +0000 UTC Jan 30 11:56:18 crc kubenswrapper[4703]: I0130 11:56:18.991480 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 03:35:49.255720035 +0000 UTC Jan 30 11:56:19 crc kubenswrapper[4703]: I0130 11:56:19.280315 4703 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 30 11:56:19 crc kubenswrapper[4703]: I0130 11:56:19.280409 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 30 11:56:19 crc kubenswrapper[4703]: I0130 11:56:19.285421 4703 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 30 11:56:19 crc kubenswrapper[4703]: I0130 11:56:19.285504 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 30 11:56:19 crc kubenswrapper[4703]: I0130 11:56:19.291840 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 11:56:19 crc kubenswrapper[4703]: I0130 11:56:19.292020 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:19 crc kubenswrapper[4703]: I0130 11:56:19.294613 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:19 crc kubenswrapper[4703]: I0130 11:56:19.294657 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:19 crc kubenswrapper[4703]: I0130 11:56:19.294670 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:19 crc kubenswrapper[4703]: I0130 11:56:19.297917 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 11:56:19 crc kubenswrapper[4703]: I0130 11:56:19.373872 4703 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 30 11:56:19 crc kubenswrapper[4703]: I0130 11:56:19.992436 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 09:35:50.827891971 +0000 UTC Jan 30 11:56:20 crc kubenswrapper[4703]: I0130 11:56:20.190412 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:20 crc kubenswrapper[4703]: I0130 11:56:20.191538 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:20 crc kubenswrapper[4703]: I0130 11:56:20.191635 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:20 crc kubenswrapper[4703]: I0130 11:56:20.191703 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:20 crc kubenswrapper[4703]: I0130 11:56:20.993974 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 01:03:35.604715792 +0000 UTC Jan 30 11:56:21 crc kubenswrapper[4703]: I0130 11:56:21.994712 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 21:50:15.277329043 +0000 UTC Jan 30 11:56:22 crc kubenswrapper[4703]: I0130 11:56:22.008556 4703 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 30 11:56:22 crc kubenswrapper[4703]: I0130 11:56:22.008614 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 30 11:56:22 crc kubenswrapper[4703]: I0130 11:56:22.658889 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 30 11:56:22 crc kubenswrapper[4703]: I0130 11:56:22.659183 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:22 crc kubenswrapper[4703]: I0130 11:56:22.660541 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:22 crc kubenswrapper[4703]: I0130 11:56:22.660627 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:22 crc kubenswrapper[4703]: I0130 11:56:22.660653 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:22 crc kubenswrapper[4703]: I0130 11:56:22.675650 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 30 11:56:22 crc kubenswrapper[4703]: I0130 11:56:22.994839 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 22:44:52.112509691 +0000 UTC Jan 30 11:56:23 crc kubenswrapper[4703]: I0130 11:56:23.199734 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:23 crc kubenswrapper[4703]: I0130 11:56:23.200668 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:23 crc kubenswrapper[4703]: I0130 11:56:23.200706 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:23 crc kubenswrapper[4703]: I0130 11:56:23.200719 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:23 crc kubenswrapper[4703]: I0130 11:56:23.629847 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:23 crc kubenswrapper[4703]: I0130 11:56:23.630007 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:23 crc kubenswrapper[4703]: I0130 11:56:23.630437 4703 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 30 11:56:23 crc kubenswrapper[4703]: I0130 11:56:23.630494 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 30 11:56:23 crc kubenswrapper[4703]: I0130 11:56:23.631484 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:23 crc kubenswrapper[4703]: I0130 11:56:23.631537 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:23 crc kubenswrapper[4703]: I0130 11:56:23.631551 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:23 crc kubenswrapper[4703]: I0130 11:56:23.635292 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:23 crc kubenswrapper[4703]: I0130 11:56:23.995801 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 18:22:16.939252572 +0000 UTC Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.202152 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.202792 4703 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.202983 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.203112 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.203236 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.203319 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.281347 4703 trace.go:236] Trace[758597922]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (30-Jan-2026 11:56:12.492) (total time: 11787ms): Jan 30 11:56:24 crc kubenswrapper[4703]: Trace[758597922]: ---"Objects listed" error: 11787ms (11:56:24.279) Jan 30 11:56:24 crc kubenswrapper[4703]: Trace[758597922]: [11.787387899s] [11.787387899s] END Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.281421 4703 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 30 11:56:24 crc kubenswrapper[4703]: E0130 11:56:24.283454 4703 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.284596 4703 trace.go:236] Trace[567900332]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (30-Jan-2026 11:56:13.448) (total time: 10835ms): Jan 30 11:56:24 crc kubenswrapper[4703]: Trace[567900332]: ---"Objects listed" error: 10835ms (11:56:24.284) Jan 30 11:56:24 crc kubenswrapper[4703]: Trace[567900332]: [10.835649505s] [10.835649505s] END Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.284626 4703 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.286106 4703 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.286765 4703 trace.go:236] Trace[366880391]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (30-Jan-2026 11:56:13.166) (total time: 11119ms): Jan 30 11:56:24 crc kubenswrapper[4703]: Trace[366880391]: ---"Objects listed" error: 11119ms (11:56:24.286) Jan 30 11:56:24 crc kubenswrapper[4703]: Trace[366880391]: [11.119986642s] [11.119986642s] END Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.286794 4703 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.385772 4703 trace.go:236] Trace[1762346664]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (30-Jan-2026 11:56:13.349) (total time: 11035ms): Jan 30 11:56:24 crc kubenswrapper[4703]: Trace[1762346664]: ---"Objects listed" error: 11035ms (11:56:24.385) Jan 30 11:56:24 crc kubenswrapper[4703]: Trace[1762346664]: [11.035718838s] [11.035718838s] END Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.385806 4703 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.424540 4703 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.509476 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.513035 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.853809 4703 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.853881 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.974937 4703 apiserver.go:52] "Watching apiserver" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.981405 4703 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.981598 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.981949 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.982405 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:24 crc kubenswrapper[4703]: E0130 11:56:24.982463 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.982556 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:24 crc kubenswrapper[4703]: E0130 11:56:24.982589 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.983183 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.983986 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.984471 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:24 crc kubenswrapper[4703]: E0130 11:56:24.984512 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.985046 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.985102 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.985202 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.985729 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.985958 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.986032 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.986232 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.986390 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.986677 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.987873 4703 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.990910 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.990954 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.990973 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.990992 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.991016 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.991035 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.991051 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.991070 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.991087 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.991103 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.992086 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.992497 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.992860 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.993273 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.993454 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.993967 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.994211 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.994357 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.994389 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.994411 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.994430 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.994450 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.994466 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.994483 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.994732 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.995040 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.995519 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.995786 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.995958 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.996144 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.996330 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.996739 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.997029 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:24 crc kubenswrapper[4703]: I0130 11:56:24.999672 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:24.999941 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:56:25.49989623 +0000 UTC m=+21.277717884 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.001318 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.001370 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.001397 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.001415 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.002686 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.003189 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.004436 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.004928 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.004997 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.005044 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.005335 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.005427 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.005568 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.005631 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.005675 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.005704 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.005723 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.005751 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.006213 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.006699 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.006723 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.006416 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.006575 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007185 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007209 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007228 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007244 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007264 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007286 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007303 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007321 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007341 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007356 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007376 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007397 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007417 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007433 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007452 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007472 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007489 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007507 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007528 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007547 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007564 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007582 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007601 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007618 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007646 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007686 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007709 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007731 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007753 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007772 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007787 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007809 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007829 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007848 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007863 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007882 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007901 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007919 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007941 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.007975 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008001 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008018 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008036 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008054 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008074 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008091 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008109 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008145 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008162 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008180 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008198 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008215 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008233 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008255 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008274 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008293 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008311 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008329 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008364 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008384 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008403 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008419 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008438 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008456 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008477 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008498 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008516 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008536 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008553 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008571 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008589 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008607 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008627 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008646 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008663 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008683 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008704 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008723 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008752 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008770 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008795 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008813 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008833 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008851 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008868 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008885 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008903 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008920 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008936 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008955 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008973 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.008990 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.009009 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.009028 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.009046 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.009064 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.009084 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.009102 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.009133 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.009375 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.009576 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.009741 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.010513 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.010700 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.011215 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.011496 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.011740 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.011942 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.012223 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.012557 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.012821 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.013110 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.013428 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.013629 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.013783 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.013837 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.014012 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.014077 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.014308 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.014307 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.014545 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.014737 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.015550 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.015679 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.015724 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.015768 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.015802 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.016018 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.016445 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.016522 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.016876 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.017948 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.018406 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.018835 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.018843 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.018883 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.018943 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.018976 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019008 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019033 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019064 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019107 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019194 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019228 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019258 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019284 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019312 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019342 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019370 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019399 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019434 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019459 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019473 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019507 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019534 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019560 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019588 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019620 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019647 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019685 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019768 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019812 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019857 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019897 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019936 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.019972 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020009 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020044 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020084 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020150 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020189 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020267 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020318 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020386 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020427 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020465 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020513 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020543 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020579 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020606 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020631 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020679 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020719 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020751 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020787 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.020919 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.021151 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.028019 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.028639 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.028688 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.032216 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.032796 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.034043 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.034085 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.034333 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.034846 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.035271 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.035959 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.036508 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.051554 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.096696 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.116280 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.116377 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.116399 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.116424 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.117598 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.118629 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.118808 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.118811 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.118943 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.119083 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.119193 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.119241 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.119368 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.119506 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.120097 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.121656 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.125248 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.125342 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.125397 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.120343 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.141303 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.141522 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.141664 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.141792 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.141944 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.142067 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.142223 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.142298 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.142478 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.142679 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.143016 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.143216 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.143282 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.143384 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.143517 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.144043 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.144145 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.144179 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.144512 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.146409 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.146432 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.146658 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.147096 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.147593 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.148042 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.148498 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.152318 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.152521 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.152688 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.152962 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.153516 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.153886 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.154285 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.154591 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.154835 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.155067 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.155297 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.155594 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.155966 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.156230 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.157229 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.157593 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.157868 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.158068 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.158282 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.158545 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.158862 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.159148 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.159575 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.159844 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.160475 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.160593 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.160924 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.161093 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.161360 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.161311 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.161689 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.161853 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.162093 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.162134 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 13:18:22.124905601 +0000 UTC Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.162404 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.162959 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.163210 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.163702 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.163781 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.163860 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.164195 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.164377 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.164829 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.164994 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.116448 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165113 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165166 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165189 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165209 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165230 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165256 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165277 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165340 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165367 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165390 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165414 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165444 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165470 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165498 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165522 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165573 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165601 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165628 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165656 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165685 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165710 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165848 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165870 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165883 4703 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165897 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165910 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165921 4703 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165935 4703 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165948 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165960 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165973 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165985 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.165998 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166009 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166021 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166035 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166046 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166058 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166070 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166083 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166096 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166108 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166137 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166151 4703 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166163 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166176 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166187 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166199 4703 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166210 4703 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166221 4703 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166235 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166247 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166259 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166269 4703 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166281 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166293 4703 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166304 4703 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166316 4703 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166327 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166339 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166350 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166362 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166374 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166386 4703 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166398 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166410 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166424 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166436 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166449 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166460 4703 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166473 4703 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166484 4703 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166496 4703 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166508 4703 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166519 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166531 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166543 4703 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166555 4703 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166567 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166579 4703 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166592 4703 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166604 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166616 4703 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166627 4703 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166639 4703 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166651 4703 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166665 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166676 4703 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166688 4703 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166700 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166712 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166723 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166735 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166747 4703 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166758 4703 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166759 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.167079 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.166771 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.167492 4703 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.168510 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.169036 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.169726 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.169920 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.170671 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.171068 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.171408 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.172092 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.172513 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.173027 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.173403 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.173869 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.174703 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.174996 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.175089 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.175225 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.175317 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.175424 4703 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.175590 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:25.675569242 +0000 UTC m=+21.453390896 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.175742 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.175964 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.175957 4703 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.175977 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.176173 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.176317 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:25.676249441 +0000 UTC m=+21.454071095 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.176362 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.176487 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.176787 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.177209 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.177486 4703 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.177513 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.177526 4703 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.177536 4703 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.177546 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.177557 4703 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.177570 4703 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.177581 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.177593 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.177603 4703 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.177612 4703 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.177622 4703 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.177631 4703 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.181594 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.177223 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.178901 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.180578 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.180887 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.181119 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.121814 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.181383 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.181554 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.182157 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.183098 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.186061 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.186457 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.187715 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.189858 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.194500 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.194551 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.194570 4703 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.194655 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:25.694630318 +0000 UTC m=+21.472451972 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.197800 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.197843 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.197857 4703 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.197912 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:25.697893087 +0000 UTC m=+21.475714741 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.197950 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.203627 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.203726 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.204410 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.205974 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.206999 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.207694 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.208170 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.209001 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.209504 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.210310 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.210532 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.211148 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.212596 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.213011 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.214035 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.215139 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.217077 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.217553 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.218375 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.218872 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.219449 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.220554 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.221187 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.238958 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.239916 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.240642 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.247676 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.250319 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.250820 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.252525 4703 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771" exitCode=255 Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.252930 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771"} Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.253024 4703 scope.go:117] "RemoveContainer" containerID="f1a709792a4ecf89e5c018f68db5b3bfdcc3284f2606053f9f699e901994ab00" Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.260712 4703 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.261099 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.265643 4703 scope.go:117] "RemoveContainer" containerID="173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771" Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.266001 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.266214 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.266526 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.277286 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278277 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278340 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278407 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278420 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278429 4703 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278437 4703 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278446 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278456 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278465 4703 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278476 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278528 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278544 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278556 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278565 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278574 4703 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278582 4703 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278591 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278603 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278612 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278621 4703 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278630 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278639 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278648 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278657 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278679 4703 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278691 4703 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278703 4703 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278715 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278727 4703 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278739 4703 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278749 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278761 4703 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278770 4703 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278781 4703 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278791 4703 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278800 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278808 4703 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278816 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278825 4703 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278833 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278841 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278849 4703 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278858 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.278998 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279102 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279112 4703 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279134 4703 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279142 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279150 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279159 4703 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279169 4703 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279201 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279197 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279270 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279281 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279289 4703 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279298 4703 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279307 4703 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279318 4703 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279326 4703 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279334 4703 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279343 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279369 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279410 4703 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279420 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279430 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279438 4703 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279447 4703 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279455 4703 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279463 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279471 4703 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279479 4703 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279487 4703 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279497 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279505 4703 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279513 4703 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279522 4703 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279530 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279538 4703 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279547 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279556 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279565 4703 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279573 4703 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279581 4703 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279589 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279598 4703 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279609 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279617 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279624 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279633 4703 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279642 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279650 4703 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279659 4703 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279668 4703 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279677 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279685 4703 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279693 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279701 4703 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279709 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279717 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279725 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279733 4703 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279741 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279751 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279760 4703 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279770 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279779 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279788 4703 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279798 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.279152 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 30 11:56:25 crc kubenswrapper[4703]: W0130 11:56:25.281728 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-2ea546dc9634d0842584b69c5c67be9608da4afaffb63eda4c9205a80cfec61b WatchSource:0}: Error finding container 2ea546dc9634d0842584b69c5c67be9608da4afaffb63eda4c9205a80cfec61b: Status 404 returned error can't find the container with id 2ea546dc9634d0842584b69c5c67be9608da4afaffb63eda4c9205a80cfec61b Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.287387 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.298595 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.308085 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.318158 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.330895 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.333801 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.341985 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.353732 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: W0130 11:56:25.357944 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-35e79cebcf3ac4a754f3ecf23b30cfcc66ed1da28d2e1536ff5adf7dc326ff3b WatchSource:0}: Error finding container 35e79cebcf3ac4a754f3ecf23b30cfcc66ed1da28d2e1536ff5adf7dc326ff3b: Status 404 returned error can't find the container with id 35e79cebcf3ac4a754f3ecf23b30cfcc66ed1da28d2e1536ff5adf7dc326ff3b Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.364823 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.375763 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.385264 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.396769 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1a709792a4ecf89e5c018f68db5b3bfdcc3284f2606053f9f699e901994ab00\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:10Z\\\",\\\"message\\\":\\\"W0130 11:56:09.704416 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0130 11:56:09.705415 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769774169 cert, and key in /tmp/serving-cert-2795561194/serving-signer.crt, /tmp/serving-cert-2795561194/serving-signer.key\\\\nI0130 11:56:10.393819 1 observer_polling.go:159] Starting file observer\\\\nW0130 11:56:10.394859 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0130 11:56:10.522843 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:10.523662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2795561194/tls.crt::/tmp/serving-cert-2795561194/tls.key\\\\\\\"\\\\nF0130 11:56:10.935958 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.408409 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.419366 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.431232 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.443595 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.455994 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.469878 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1a709792a4ecf89e5c018f68db5b3bfdcc3284f2606053f9f699e901994ab00\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:10Z\\\",\\\"message\\\":\\\"W0130 11:56:09.704416 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0130 11:56:09.705415 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769774169 cert, and key in /tmp/serving-cert-2795561194/serving-signer.crt, /tmp/serving-cert-2795561194/serving-signer.key\\\\nI0130 11:56:10.393819 1 observer_polling.go:159] Starting file observer\\\\nW0130 11:56:10.394859 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0130 11:56:10.522843 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:10.523662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2795561194/tls.crt::/tmp/serving-cert-2795561194/tls.key\\\\\\\"\\\\nF0130 11:56:10.935958 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.479773 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.491384 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.505165 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.574584 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.581929 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.582153 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:56:26.582084188 +0000 UTC m=+22.359905862 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:56:25 crc kubenswrapper[4703]: W0130 11:56:25.614586 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-76b33b55379771594b99ee68b7371ba24c421bdf80137205a1666cf92b6631a6 WatchSource:0}: Error finding container 76b33b55379771594b99ee68b7371ba24c421bdf80137205a1666cf92b6631a6: Status 404 returned error can't find the container with id 76b33b55379771594b99ee68b7371ba24c421bdf80137205a1666cf92b6631a6 Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.682898 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.682968 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.683092 4703 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.683209 4703 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.683239 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:26.683215045 +0000 UTC m=+22.461036869 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.683291 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:26.683271207 +0000 UTC m=+22.461092861 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.783758 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:25 crc kubenswrapper[4703]: I0130 11:56:25.783839 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.783964 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.784025 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.784041 4703 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.784093 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:26.784077635 +0000 UTC m=+22.561899289 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.784281 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.784329 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.784351 4703 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:25 crc kubenswrapper[4703]: E0130 11:56:25.784472 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:26.784445145 +0000 UTC m=+22.562266819 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.085955 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:26 crc kubenswrapper[4703]: E0130 11:56:26.086145 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.162690 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 14:10:32.580035285 +0000 UTC Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.256829 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"76b33b55379771594b99ee68b7371ba24c421bdf80137205a1666cf92b6631a6"} Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.257957 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113"} Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.258024 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"35e79cebcf3ac4a754f3ecf23b30cfcc66ed1da28d2e1536ff5adf7dc326ff3b"} Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.259598 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae"} Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.259673 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e"} Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.259697 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"2ea546dc9634d0842584b69c5c67be9608da4afaffb63eda4c9205a80cfec61b"} Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.261140 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.264431 4703 scope.go:117] "RemoveContainer" containerID="173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771" Jan 30 11:56:26 crc kubenswrapper[4703]: E0130 11:56:26.264557 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.273260 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.286306 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.295713 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.304671 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.314615 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.323576 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.340389 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1a709792a4ecf89e5c018f68db5b3bfdcc3284f2606053f9f699e901994ab00\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:10Z\\\",\\\"message\\\":\\\"W0130 11:56:09.704416 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI0130 11:56:09.705415 1 crypto.go:601] Generating new CA for check-endpoints-signer@1769774169 cert, and key in /tmp/serving-cert-2795561194/serving-signer.crt, /tmp/serving-cert-2795561194/serving-signer.key\\\\nI0130 11:56:10.393819 1 observer_polling.go:159] Starting file observer\\\\nW0130 11:56:10.394859 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI0130 11:56:10.522843 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:10.523662 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2795561194/tls.crt::/tmp/serving-cert-2795561194/tls.key\\\\\\\"\\\\nF0130 11:56:10.935958 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.364252 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.381724 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.396461 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.418723 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.432920 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.444646 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.458870 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.477345 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.488685 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.590417 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:56:26 crc kubenswrapper[4703]: E0130 11:56:26.590675 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:56:28.590641367 +0000 UTC m=+24.368463061 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.690878 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.690926 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:26 crc kubenswrapper[4703]: E0130 11:56:26.691000 4703 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:56:26 crc kubenswrapper[4703]: E0130 11:56:26.691038 4703 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:56:26 crc kubenswrapper[4703]: E0130 11:56:26.691095 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:28.691076866 +0000 UTC m=+24.468898520 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:56:26 crc kubenswrapper[4703]: E0130 11:56:26.691113 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:28.691105286 +0000 UTC m=+24.468926940 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.791778 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:26 crc kubenswrapper[4703]: I0130 11:56:26.791842 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:26 crc kubenswrapper[4703]: E0130 11:56:26.791951 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:56:26 crc kubenswrapper[4703]: E0130 11:56:26.791966 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:56:26 crc kubenswrapper[4703]: E0130 11:56:26.791977 4703 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:26 crc kubenswrapper[4703]: E0130 11:56:26.792009 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:56:26 crc kubenswrapper[4703]: E0130 11:56:26.792059 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:56:26 crc kubenswrapper[4703]: E0130 11:56:26.792080 4703 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:26 crc kubenswrapper[4703]: E0130 11:56:26.792027 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:28.792014498 +0000 UTC m=+24.569836142 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:26 crc kubenswrapper[4703]: E0130 11:56:26.792196 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:28.792167862 +0000 UTC m=+24.569989556 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.085881 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.085927 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:27 crc kubenswrapper[4703]: E0130 11:56:27.086098 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:27 crc kubenswrapper[4703]: E0130 11:56:27.086254 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.091642 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.093283 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.095443 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.115449 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.116056 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.117150 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.117744 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.118746 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.119331 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.120221 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.120719 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.175593 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-28 19:22:58.697139771 +0000 UTC Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.178095 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.178645 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.179586 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.180207 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.180782 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.181815 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.182523 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.183506 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.183974 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.184396 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.185461 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.185943 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.186988 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.187647 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.188502 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.189045 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.189909 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.190472 4703 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 30 11:56:27 crc kubenswrapper[4703]: I0130 11:56:27.190577 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 30 11:56:28 crc kubenswrapper[4703]: I0130 11:56:28.085632 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:28 crc kubenswrapper[4703]: E0130 11:56:28.085775 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:28 crc kubenswrapper[4703]: I0130 11:56:28.176201 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 20:08:55.681944312 +0000 UTC Jan 30 11:56:28 crc kubenswrapper[4703]: I0130 11:56:28.754316 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:56:28 crc kubenswrapper[4703]: I0130 11:56:28.754408 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:28 crc kubenswrapper[4703]: I0130 11:56:28.754434 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:28 crc kubenswrapper[4703]: E0130 11:56:28.754622 4703 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:56:28 crc kubenswrapper[4703]: E0130 11:56:28.754672 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:32.754655346 +0000 UTC m=+28.532477000 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:56:28 crc kubenswrapper[4703]: E0130 11:56:28.754962 4703 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:56:28 crc kubenswrapper[4703]: E0130 11:56:28.755062 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:56:32.755030946 +0000 UTC m=+28.532852620 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:56:28 crc kubenswrapper[4703]: E0130 11:56:28.755116 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:32.755104148 +0000 UTC m=+28.532925812 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:56:28 crc kubenswrapper[4703]: I0130 11:56:28.855633 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:28 crc kubenswrapper[4703]: I0130 11:56:28.855693 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:28 crc kubenswrapper[4703]: E0130 11:56:28.855826 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:56:28 crc kubenswrapper[4703]: E0130 11:56:28.855844 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:56:28 crc kubenswrapper[4703]: E0130 11:56:28.855854 4703 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:28 crc kubenswrapper[4703]: E0130 11:56:28.855890 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:56:28 crc kubenswrapper[4703]: E0130 11:56:28.855938 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:56:28 crc kubenswrapper[4703]: E0130 11:56:28.855952 4703 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:28 crc kubenswrapper[4703]: E0130 11:56:28.855897 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:32.855884956 +0000 UTC m=+28.633706610 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:28 crc kubenswrapper[4703]: E0130 11:56:28.856034 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:32.856014139 +0000 UTC m=+28.633835803 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:29 crc kubenswrapper[4703]: I0130 11:56:29.086487 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:29 crc kubenswrapper[4703]: E0130 11:56:29.086703 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:29 crc kubenswrapper[4703]: I0130 11:56:29.087194 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:29 crc kubenswrapper[4703]: E0130 11:56:29.087244 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:29 crc kubenswrapper[4703]: I0130 11:56:29.177393 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 05:48:48.785887137 +0000 UTC Jan 30 11:56:30 crc kubenswrapper[4703]: I0130 11:56:30.086238 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:30 crc kubenswrapper[4703]: E0130 11:56:30.086489 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:30 crc kubenswrapper[4703]: I0130 11:56:30.178199 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 08:17:14.019332843 +0000 UTC Jan 30 11:56:31 crc kubenswrapper[4703]: I0130 11:56:31.089993 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:31 crc kubenswrapper[4703]: I0130 11:56:31.090020 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:31 crc kubenswrapper[4703]: E0130 11:56:31.090176 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:31 crc kubenswrapper[4703]: E0130 11:56:31.090345 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:31 crc kubenswrapper[4703]: I0130 11:56:31.178362 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 13:44:05.029024238 +0000 UTC Jan 30 11:56:31 crc kubenswrapper[4703]: I0130 11:56:31.285122 4703 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 30 11:56:31 crc kubenswrapper[4703]: I0130 11:56:31.292082 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:31 crc kubenswrapper[4703]: I0130 11:56:31.292150 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:31 crc kubenswrapper[4703]: I0130 11:56:31.292165 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:31 crc kubenswrapper[4703]: I0130 11:56:31.292249 4703 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 30 11:56:31 crc kubenswrapper[4703]: I0130 11:56:31.540793 4703 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 30 11:56:31 crc kubenswrapper[4703]: I0130 11:56:31.541446 4703 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 30 11:56:31 crc kubenswrapper[4703]: I0130 11:56:31.543023 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:31 crc kubenswrapper[4703]: I0130 11:56:31.543074 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:31 crc kubenswrapper[4703]: I0130 11:56:31.543084 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:31 crc kubenswrapper[4703]: I0130 11:56:31.543103 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:31 crc kubenswrapper[4703]: I0130 11:56:31.543113 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:31Z","lastTransitionTime":"2026-01-30T11:56:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:31 crc kubenswrapper[4703]: I0130 11:56:31.636340 4703 csr.go:261] certificate signing request csr-xggqg is approved, waiting to be issued Jan 30 11:56:31 crc kubenswrapper[4703]: I0130 11:56:31.817840 4703 csr.go:257] certificate signing request csr-xggqg is issued Jan 30 11:56:31 crc kubenswrapper[4703]: E0130 11:56:31.889297 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:31Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:31Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.008187 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.009947 4703 scope.go:117] "RemoveContainer" containerID="173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771" Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.010313 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.052580 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.052923 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.053042 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.053150 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.053256 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:32Z","lastTransitionTime":"2026-01-30T11:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.077282 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:32Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.081480 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.081514 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.081524 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.081542 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.081551 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:32Z","lastTransitionTime":"2026-01-30T11:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.085223 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.085340 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.098379 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:32Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.114552 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.114608 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.114621 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.114640 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.114651 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:32Z","lastTransitionTime":"2026-01-30T11:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.136717 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:32Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.141626 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.141671 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.141684 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.141706 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.141720 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:32Z","lastTransitionTime":"2026-01-30T11:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.160208 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:32Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.160332 4703 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.162091 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.162263 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.162341 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.162440 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.162552 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:32Z","lastTransitionTime":"2026-01-30T11:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.179193 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 01:06:22.113468422 +0000 UTC Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.317697 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.317731 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.317741 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.317755 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.317765 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:32Z","lastTransitionTime":"2026-01-30T11:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.419602 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.419653 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.419668 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.419687 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.419700 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:32Z","lastTransitionTime":"2026-01-30T11:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.442629 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-l8kf2"] Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.443013 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-l8kf2" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.446743 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.447988 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.448405 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.477937 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:32Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.488160 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:32Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.503888 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:32Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.518415 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdj8x\" (UniqueName: \"kubernetes.io/projected/9d1378c4-1c78-4865-9168-822ad86bae15-kube-api-access-hdj8x\") pod \"node-resolver-l8kf2\" (UID: \"9d1378c4-1c78-4865-9168-822ad86bae15\") " pod="openshift-dns/node-resolver-l8kf2" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.518530 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/9d1378c4-1c78-4865-9168-822ad86bae15-hosts-file\") pod \"node-resolver-l8kf2\" (UID: \"9d1378c4-1c78-4865-9168-822ad86bae15\") " pod="openshift-dns/node-resolver-l8kf2" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.524958 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.525083 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.525186 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.525237 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.525254 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:32Z","lastTransitionTime":"2026-01-30T11:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.586581 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:32Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.619147 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdj8x\" (UniqueName: \"kubernetes.io/projected/9d1378c4-1c78-4865-9168-822ad86bae15-kube-api-access-hdj8x\") pod \"node-resolver-l8kf2\" (UID: \"9d1378c4-1c78-4865-9168-822ad86bae15\") " pod="openshift-dns/node-resolver-l8kf2" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.619214 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/9d1378c4-1c78-4865-9168-822ad86bae15-hosts-file\") pod \"node-resolver-l8kf2\" (UID: \"9d1378c4-1c78-4865-9168-822ad86bae15\") " pod="openshift-dns/node-resolver-l8kf2" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.619316 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/9d1378c4-1c78-4865-9168-822ad86bae15-hosts-file\") pod \"node-resolver-l8kf2\" (UID: \"9d1378c4-1c78-4865-9168-822ad86bae15\") " pod="openshift-dns/node-resolver-l8kf2" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.619353 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:32Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.733219 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.733677 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.733780 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.733853 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.733917 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:32Z","lastTransitionTime":"2026-01-30T11:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.762008 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdj8x\" (UniqueName: \"kubernetes.io/projected/9d1378c4-1c78-4865-9168-822ad86bae15-kube-api-access-hdj8x\") pod \"node-resolver-l8kf2\" (UID: \"9d1378c4-1c78-4865-9168-822ad86bae15\") " pod="openshift-dns/node-resolver-l8kf2" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.765143 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:32Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.775050 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-l8kf2" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.780511 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:32Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:32 crc kubenswrapper[4703]: W0130 11:56:32.790608 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d1378c4_1c78_4865_9168_822ad86bae15.slice/crio-0cf0c0d8ce9225fc02a73a67410a2d0b2e84e0cd4509ba85de7aabe6d816c33a WatchSource:0}: Error finding container 0cf0c0d8ce9225fc02a73a67410a2d0b2e84e0cd4509ba85de7aabe6d816c33a: Status 404 returned error can't find the container with id 0cf0c0d8ce9225fc02a73a67410a2d0b2e84e0cd4509ba85de7aabe6d816c33a Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.801395 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:32Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.819297 4703 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-30 11:51:31 +0000 UTC, rotation deadline is 2026-11-22 06:32:22.709942448 +0000 UTC Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.819367 4703 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7098h35m49.890577793s for next certificate rotation Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.830710 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:32Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.832878 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.833049 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.833160 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.833226 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:56:40.833166935 +0000 UTC m=+36.610988589 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.833338 4703 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.833268 4703 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.833428 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:40.833397172 +0000 UTC m=+36.611218966 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.833454 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:40.833443283 +0000 UTC m=+36.611265137 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.840644 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.840685 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.840697 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.840718 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.840742 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:32Z","lastTransitionTime":"2026-01-30T11:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.897184 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-72zlj"] Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.897569 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-cx2rm"] Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.897808 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-9gsnx"] Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.897934 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.897959 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.898937 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.900049 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.900462 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.900714 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.900850 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.900959 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.901113 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.901325 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.901646 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.901825 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.902311 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.902443 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.903302 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.922703 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:32Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934089 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-run-netns\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934149 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-system-cni-dir\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934166 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-var-lib-kubelet\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934201 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/32108840-3d15-43ae-b3d1-fa5b8eb931c7-cnibin\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934215 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-multus-cni-dir\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934230 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-hostroot\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934247 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ffea6197-b1fb-427b-adc5-bcc1c6108235-proxy-tls\") pod \"machine-config-daemon-cx2rm\" (UID: \"ffea6197-b1fb-427b-adc5-bcc1c6108235\") " pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934262 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-cnibin\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934280 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-multus-daemon-config\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934362 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934384 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/32108840-3d15-43ae-b3d1-fa5b8eb931c7-cni-binary-copy\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934401 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/32108840-3d15-43ae-b3d1-fa5b8eb931c7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934419 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w886f\" (UniqueName: \"kubernetes.io/projected/ffea6197-b1fb-427b-adc5-bcc1c6108235-kube-api-access-w886f\") pod \"machine-config-daemon-cx2rm\" (UID: \"ffea6197-b1fb-427b-adc5-bcc1c6108235\") " pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934439 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934460 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5l9xq\" (UniqueName: \"kubernetes.io/projected/32108840-3d15-43ae-b3d1-fa5b8eb931c7-kube-api-access-5l9xq\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934562 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/32108840-3d15-43ae-b3d1-fa5b8eb931c7-system-cni-dir\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934578 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/ffea6197-b1fb-427b-adc5-bcc1c6108235-rootfs\") pod \"machine-config-daemon-cx2rm\" (UID: \"ffea6197-b1fb-427b-adc5-bcc1c6108235\") " pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934620 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-cni-binary-copy\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934647 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-var-lib-cni-bin\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934662 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-var-lib-cni-multus\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934678 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/32108840-3d15-43ae-b3d1-fa5b8eb931c7-os-release\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934693 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-etc-kubernetes\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934710 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-multus-socket-dir-parent\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934726 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-multus-conf-dir\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934743 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-run-multus-certs\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934763 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ffea6197-b1fb-427b-adc5-bcc1c6108235-mcd-auth-proxy-config\") pod \"machine-config-daemon-cx2rm\" (UID: \"ffea6197-b1fb-427b-adc5-bcc1c6108235\") " pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934787 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/32108840-3d15-43ae-b3d1-fa5b8eb931c7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934808 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-run-k8s-cni-cncf-io\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934824 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52zng\" (UniqueName: \"kubernetes.io/projected/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-kube-api-access-52zng\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.934838 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-os-release\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.935069 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.935125 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.935155 4703 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.935207 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:40.935188938 +0000 UTC m=+36.713010592 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.935698 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.935715 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.935725 4703 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:32 crc kubenswrapper[4703]: E0130 11:56:32.935749 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:40.935740972 +0000 UTC m=+36.713562626 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.938216 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:32Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.945822 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.945857 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.945868 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.945890 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.945907 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:32Z","lastTransitionTime":"2026-01-30T11:56:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.956978 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:32Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:32 crc kubenswrapper[4703]: I0130 11:56:32.987242 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:32Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.006028 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.028894 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.055307 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.075378 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-multus-cni-dir\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.075426 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-hostroot\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.075452 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/32108840-3d15-43ae-b3d1-fa5b8eb931c7-cni-binary-copy\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.075486 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ffea6197-b1fb-427b-adc5-bcc1c6108235-proxy-tls\") pod \"machine-config-daemon-cx2rm\" (UID: \"ffea6197-b1fb-427b-adc5-bcc1c6108235\") " pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.075513 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-cnibin\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.075534 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-multus-daemon-config\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.075775 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/32108840-3d15-43ae-b3d1-fa5b8eb931c7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.075834 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w886f\" (UniqueName: \"kubernetes.io/projected/ffea6197-b1fb-427b-adc5-bcc1c6108235-kube-api-access-w886f\") pod \"machine-config-daemon-cx2rm\" (UID: \"ffea6197-b1fb-427b-adc5-bcc1c6108235\") " pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.075854 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5l9xq\" (UniqueName: \"kubernetes.io/projected/32108840-3d15-43ae-b3d1-fa5b8eb931c7-kube-api-access-5l9xq\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.075877 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-cni-binary-copy\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.075895 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/32108840-3d15-43ae-b3d1-fa5b8eb931c7-system-cni-dir\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.075913 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/ffea6197-b1fb-427b-adc5-bcc1c6108235-rootfs\") pod \"machine-config-daemon-cx2rm\" (UID: \"ffea6197-b1fb-427b-adc5-bcc1c6108235\") " pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.075931 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-var-lib-cni-bin\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.075954 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-var-lib-cni-multus\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.075986 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-etc-kubernetes\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076013 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/32108840-3d15-43ae-b3d1-fa5b8eb931c7-os-release\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076045 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ffea6197-b1fb-427b-adc5-bcc1c6108235-mcd-auth-proxy-config\") pod \"machine-config-daemon-cx2rm\" (UID: \"ffea6197-b1fb-427b-adc5-bcc1c6108235\") " pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076069 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-multus-socket-dir-parent\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076093 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-multus-conf-dir\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076156 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-run-multus-certs\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076184 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-run-k8s-cni-cncf-io\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076213 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52zng\" (UniqueName: \"kubernetes.io/projected/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-kube-api-access-52zng\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076229 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-multus-daemon-config\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076235 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/32108840-3d15-43ae-b3d1-fa5b8eb931c7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076285 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-os-release\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076304 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-run-netns\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076336 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/32108840-3d15-43ae-b3d1-fa5b8eb931c7-cnibin\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076351 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-system-cni-dir\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076369 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-var-lib-kubelet\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076401 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/32108840-3d15-43ae-b3d1-fa5b8eb931c7-cni-binary-copy\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076418 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-var-lib-kubelet\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076470 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-cnibin\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076472 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-os-release\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076490 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-run-netns\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076514 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/32108840-3d15-43ae-b3d1-fa5b8eb931c7-cnibin\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076535 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-multus-cni-dir\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076562 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-hostroot\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076587 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-var-lib-cni-multus\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076653 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-system-cni-dir\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076720 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-multus-socket-dir-parent\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076766 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-etc-kubernetes\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076808 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/32108840-3d15-43ae-b3d1-fa5b8eb931c7-os-release\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076962 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/32108840-3d15-43ae-b3d1-fa5b8eb931c7-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.076986 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-run-multus-certs\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.077029 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-multus-conf-dir\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.077034 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-run-k8s-cni-cncf-io\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.077075 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/ffea6197-b1fb-427b-adc5-bcc1c6108235-rootfs\") pod \"machine-config-daemon-cx2rm\" (UID: \"ffea6197-b1fb-427b-adc5-bcc1c6108235\") " pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.077105 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/32108840-3d15-43ae-b3d1-fa5b8eb931c7-system-cni-dir\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.077166 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/32108840-3d15-43ae-b3d1-fa5b8eb931c7-tuning-conf-dir\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.078668 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.078728 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.078742 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.078761 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.078772 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:33Z","lastTransitionTime":"2026-01-30T11:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.082042 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.086332 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.086397 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:33 crc kubenswrapper[4703]: E0130 11:56:33.086493 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:33 crc kubenswrapper[4703]: E0130 11:56:33.086648 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.077161 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-host-var-lib-cni-bin\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.088490 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ffea6197-b1fb-427b-adc5-bcc1c6108235-mcd-auth-proxy-config\") pod \"machine-config-daemon-cx2rm\" (UID: \"ffea6197-b1fb-427b-adc5-bcc1c6108235\") " pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.088766 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-cni-binary-copy\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.099825 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ffea6197-b1fb-427b-adc5-bcc1c6108235-proxy-tls\") pod \"machine-config-daemon-cx2rm\" (UID: \"ffea6197-b1fb-427b-adc5-bcc1c6108235\") " pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.108803 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w886f\" (UniqueName: \"kubernetes.io/projected/ffea6197-b1fb-427b-adc5-bcc1c6108235-kube-api-access-w886f\") pod \"machine-config-daemon-cx2rm\" (UID: \"ffea6197-b1fb-427b-adc5-bcc1c6108235\") " pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.108859 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52zng\" (UniqueName: \"kubernetes.io/projected/874a5df5-f6bd-4111-aefa-f43e43e1fcc0-kube-api-access-52zng\") pod \"multus-72zlj\" (UID: \"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\") " pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.109428 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5l9xq\" (UniqueName: \"kubernetes.io/projected/32108840-3d15-43ae-b3d1-fa5b8eb931c7-kube-api-access-5l9xq\") pod \"multus-additional-cni-plugins-9gsnx\" (UID: \"32108840-3d15-43ae-b3d1-fa5b8eb931c7\") " pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.191233 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 12:24:20.641848142 +0000 UTC Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.192871 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.192905 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.192916 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.192936 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.192945 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:33Z","lastTransitionTime":"2026-01-30T11:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.194878 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.220132 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.235757 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-72zlj" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.245363 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" Jan 30 11:56:33 crc kubenswrapper[4703]: W0130 11:56:33.272001 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod874a5df5_f6bd_4111_aefa_f43e43e1fcc0.slice/crio-c71d28493553343f8064c79117c604efc42fccf5bebb886bf57bece80d5648ac WatchSource:0}: Error finding container c71d28493553343f8064c79117c604efc42fccf5bebb886bf57bece80d5648ac: Status 404 returned error can't find the container with id c71d28493553343f8064c79117c604efc42fccf5bebb886bf57bece80d5648ac Jan 30 11:56:33 crc kubenswrapper[4703]: W0130 11:56:33.287026 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod32108840_3d15_43ae_b3d1_fa5b8eb931c7.slice/crio-68d6ddbbd474b1b007afb4ad02f9cc591d5a5e520bf0df326a3f7a70a06dbd2d WatchSource:0}: Error finding container 68d6ddbbd474b1b007afb4ad02f9cc591d5a5e520bf0df326a3f7a70a06dbd2d: Status 404 returned error can't find the container with id 68d6ddbbd474b1b007afb4ad02f9cc591d5a5e520bf0df326a3f7a70a06dbd2d Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.327872 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.327902 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.327913 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.327928 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.327937 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:33Z","lastTransitionTime":"2026-01-30T11:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.332299 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.335656 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-72zlj" event={"ID":"874a5df5-f6bd-4111-aefa-f43e43e1fcc0","Type":"ContainerStarted","Data":"c71d28493553343f8064c79117c604efc42fccf5bebb886bf57bece80d5648ac"} Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.372612 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerStarted","Data":"9a227f53d86287b7254d7bda28f8393cd4ee97d3c088dfea2483b47e94419965"} Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.374379 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" event={"ID":"32108840-3d15-43ae-b3d1-fa5b8eb931c7","Type":"ContainerStarted","Data":"68d6ddbbd474b1b007afb4ad02f9cc591d5a5e520bf0df326a3f7a70a06dbd2d"} Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.378631 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-l8kf2" event={"ID":"9d1378c4-1c78-4865-9168-822ad86bae15","Type":"ContainerStarted","Data":"bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3"} Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.378661 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-l8kf2" event={"ID":"9d1378c4-1c78-4865-9168-822ad86bae15","Type":"ContainerStarted","Data":"0cf0c0d8ce9225fc02a73a67410a2d0b2e84e0cd4509ba85de7aabe6d816c33a"} Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.408327 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-n7wnf"] Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.410694 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.414061 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.414183 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.422724 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-env-overrides\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.422930 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-kubelet\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.424626 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-cni-bin\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.424771 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-run-ovn-kubernetes\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.424865 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-run-netns\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.424955 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-openvswitch\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.423468 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.425069 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.425445 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-node-log\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.425542 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-ovnkube-script-lib\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.425642 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-systemd-units\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.425732 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/06ffa267-20b9-4132-9f87-1218b111ebbc-ovn-node-metrics-cert\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.425820 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-var-lib-openvswitch\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.425962 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-ovnkube-config\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.424419 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.426060 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-cni-netd\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.426198 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-slash\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.426246 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-systemd\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.424528 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.426273 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-ovn\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.426294 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-etc-openvswitch\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.426312 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbx97\" (UniqueName: \"kubernetes.io/projected/06ffa267-20b9-4132-9f87-1218b111ebbc-kube-api-access-dbx97\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.426336 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-log-socket\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.424552 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.425729 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.430670 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.430693 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.430700 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.430713 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.430723 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:33Z","lastTransitionTime":"2026-01-30T11:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.537609 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.538170 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/06ffa267-20b9-4132-9f87-1218b111ebbc-ovn-node-metrics-cert\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.539166 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-var-lib-openvswitch\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.539347 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-ovnkube-config\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.539516 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-cni-netd\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.539638 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-slash\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.539745 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-systemd\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.539843 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-ovn\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.539958 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-etc-openvswitch\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.540049 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbx97\" (UniqueName: \"kubernetes.io/projected/06ffa267-20b9-4132-9f87-1218b111ebbc-kube-api-access-dbx97\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.540161 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-log-socket\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.540269 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-kubelet\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.540407 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-env-overrides\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.540589 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-cni-bin\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.540689 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-run-ovn-kubernetes\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.540795 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-openvswitch\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.540886 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.541069 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-etc-openvswitch\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.539256 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-var-lib-openvswitch\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.541572 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/06ffa267-20b9-4132-9f87-1218b111ebbc-ovn-node-metrics-cert\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.542178 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-log-socket\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.542213 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-ovn\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.542349 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-systemd\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.542442 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-cni-netd\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.542515 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-kubelet\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.542500 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-cni-bin\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.542483 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-slash\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.542559 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-run-ovn-kubernetes\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.542563 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-env-overrides\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.542584 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-openvswitch\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.542624 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-run-netns\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.542654 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-ovnkube-script-lib\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.542695 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-run-netns\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.542927 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.543096 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-node-log\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.543233 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-node-log\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.543284 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-ovnkube-script-lib\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.543311 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-systemd-units\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.543344 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-systemd-units\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.543740 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-ovnkube-config\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.549410 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.549447 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.549457 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.549472 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.549482 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:33Z","lastTransitionTime":"2026-01-30T11:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.633781 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbx97\" (UniqueName: \"kubernetes.io/projected/06ffa267-20b9-4132-9f87-1218b111ebbc-kube-api-access-dbx97\") pod \"ovnkube-node-n7wnf\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.641057 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.654619 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.654665 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.654688 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.654715 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.654731 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:33Z","lastTransitionTime":"2026-01-30T11:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.698364 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.720336 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.793561 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.793641 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.803853 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.803901 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.803914 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.803932 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.803943 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:33Z","lastTransitionTime":"2026-01-30T11:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.894762 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.913799 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.927712 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.927757 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.927772 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.927794 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.927849 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:33Z","lastTransitionTime":"2026-01-30T11:56:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:33 crc kubenswrapper[4703]: I0130 11:56:33.928348 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.112785 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:34 crc kubenswrapper[4703]: E0130 11:56:34.113029 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.113979 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:34 crc kubenswrapper[4703]: E0130 11:56:34.114240 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.114701 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.114748 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.114763 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.114781 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.114793 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:34Z","lastTransitionTime":"2026-01-30T11:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.138641 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.171720 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.192751 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 14:45:43.21388897 +0000 UTC Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.193308 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.217231 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.217278 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.217295 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.217328 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.217345 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:34Z","lastTransitionTime":"2026-01-30T11:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.220159 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.241986 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.264497 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.322732 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.322764 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.322774 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.322792 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.322802 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:34Z","lastTransitionTime":"2026-01-30T11:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.381486 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" event={"ID":"32108840-3d15-43ae-b3d1-fa5b8eb931c7","Type":"ContainerStarted","Data":"08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be"} Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.382884 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerStarted","Data":"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835"} Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.382928 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerStarted","Data":"08182a420baba462c4c12a1921e38e6566f5f755c5a8757e5b5f436b449ffa38"} Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.383822 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-72zlj" event={"ID":"874a5df5-f6bd-4111-aefa-f43e43e1fcc0","Type":"ContainerStarted","Data":"f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890"} Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.385432 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerStarted","Data":"2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883"} Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.385491 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerStarted","Data":"58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed"} Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.398533 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.411609 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.424690 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.437307 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.437374 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.437386 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.437413 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.437426 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:34Z","lastTransitionTime":"2026-01-30T11:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.438042 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.454347 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.472516 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.485961 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.497884 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.510869 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.524184 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.539797 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.539842 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.539852 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.539871 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.539883 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:34Z","lastTransitionTime":"2026-01-30T11:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.541445 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.553211 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.575770 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.588106 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:34Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.642718 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.642754 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.642762 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.642779 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.642791 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:34Z","lastTransitionTime":"2026-01-30T11:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.746409 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.746691 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.746704 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.746747 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.746761 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:34Z","lastTransitionTime":"2026-01-30T11:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.850427 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.850481 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.850494 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.850519 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.850533 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:34Z","lastTransitionTime":"2026-01-30T11:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.953512 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.953565 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.953587 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.953605 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.953619 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:34Z","lastTransitionTime":"2026-01-30T11:56:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.960767 4703 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961041 4703 reflector.go:484] object-"openshift-network-node-identity"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961074 4703 reflector.go:484] object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961061 4703 reflector.go:484] object-"openshift-network-node-identity"/"network-node-identity-cert": watch of *v1.Secret ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961091 4703 reflector.go:484] object-"openshift-dns"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961160 4703 reflector.go:484] object-"openshift-network-node-identity"/"ovnkube-identity-cm": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961202 4703 reflector.go:484] object-"openshift-multus"/"cni-copy-resources": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961208 4703 reflector.go:484] object-"openshift-machine-config-operator"/"proxy-tls": watch of *v1.Secret ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961053 4703 reflector.go:484] object-"openshift-dns"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961227 4703 reflector.go:484] object-"openshift-ovn-kubernetes"/"ovnkube-config": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961207 4703 reflector.go:484] object-"openshift-ovn-kubernetes"/"ovnkube-script-lib": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961083 4703 reflector.go:484] object-"openshift-machine-config-operator"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961238 4703 reflector.go:484] object-"openshift-multus"/"default-dockercfg-2q5b6": watch of *v1.Secret ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961263 4703 reflector.go:484] object-"openshift-multus"/"multus-daemon-config": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961252 4703 reflector.go:484] object-"openshift-machine-config-operator"/"kube-rbac-proxy": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961274 4703 reflector.go:484] object-"openshift-multus"/"default-cni-sysctl-allowlist": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961291 4703 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.CSIDriver ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961186 4703 reflector.go:484] object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl": watch of *v1.Secret ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961309 4703 reflector.go:484] object-"openshift-multus"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961291 4703 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.RuntimeClass ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961138 4703 reflector.go:484] object-"openshift-ovn-kubernetes"/"env-overrides": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961305 4703 reflector.go:484] object-"openshift-network-node-identity"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961143 4703 reflector.go:484] object-"openshift-network-operator"/"metrics-tls": watch of *v1.Secret ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961170 4703 reflector.go:484] object-"openshift-multus"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961042 4703 reflector.go:484] object-"openshift-network-operator"/"iptables-alerter-script": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961165 4703 reflector.go:484] pkg/kubelet/config/apiserver.go:66: watch of *v1.Pod ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961588 4703 reflector.go:484] object-"openshift-network-node-identity"/"env-overrides": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: I0130 11:56:34.961388 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Patch \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-node-identity/pods/network-node-identity-vrzqb/status\": http2: client connection force closed via ClientConn.Close" Jan 30 11:56:34 crc kubenswrapper[4703]: E0130 11:56:34.961622 4703 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": write tcp 38.129.56.250:49110->38.129.56.250:6443: use of closed network connection" Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961187 4703 reflector.go:484] object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert": watch of *v1.Secret ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961191 4703 reflector.go:484] object-"openshift-network-operator"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961206 4703 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.Node ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961230 4703 reflector.go:484] k8s.io/client-go/informers/factory.go:160: watch of *v1.Service ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961248 4703 reflector.go:484] object-"openshift-machine-config-operator"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961251 4703 reflector.go:484] object-"openshift-ovn-kubernetes"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961085 4703 reflector.go:484] object-"openshift-dns"/"node-resolver-dockercfg-kz9s7": watch of *v1.Secret ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961274 4703 reflector.go:484] object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq": watch of *v1.Secret ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961280 4703 reflector.go:484] object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz": watch of *v1.Secret ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:34 crc kubenswrapper[4703]: W0130 11:56:34.961332 4703 reflector.go:484] object-"openshift-network-operator"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: an error on the server ("unable to decode an event from the watch stream: http2: client connection force closed via ClientConn.Close") has prevented the request from succeeding Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.051403 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.068896 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.068933 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.068941 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.068956 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.068968 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:35Z","lastTransitionTime":"2026-01-30T11:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.086307 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:35 crc kubenswrapper[4703]: E0130 11:56:35.086497 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.125168 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.140284 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.152810 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.169216 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.170904 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.170970 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.170988 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.171029 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.171043 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:35Z","lastTransitionTime":"2026-01-30T11:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.184106 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.193621 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 15:08:52.834974941 +0000 UTC Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.200171 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.220602 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.232514 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.242978 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.257039 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.273534 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.273569 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.273577 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.273591 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.273600 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:35Z","lastTransitionTime":"2026-01-30T11:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.274429 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.287671 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.302312 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.322339 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.337391 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.356440 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.369892 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.375655 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.375685 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.375694 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.375707 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.375716 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:35Z","lastTransitionTime":"2026-01-30T11:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.380677 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.389412 4703 generic.go:334] "Generic (PLEG): container finished" podID="32108840-3d15-43ae-b3d1-fa5b8eb931c7" containerID="08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be" exitCode=0 Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.389512 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" event={"ID":"32108840-3d15-43ae-b3d1-fa5b8eb931c7","Type":"ContainerDied","Data":"08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be"} Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.391520 4703 generic.go:334] "Generic (PLEG): container finished" podID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerID="e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835" exitCode=0 Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.391558 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerDied","Data":"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835"} Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.402065 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.419617 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.438111 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.451764 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.463033 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.477675 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.477799 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.478193 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.478205 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.478222 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.478234 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:35Z","lastTransitionTime":"2026-01-30T11:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.487634 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.500480 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.512981 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.524428 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.544547 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.556570 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.585667 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.585709 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.585722 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.585764 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.585796 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:35Z","lastTransitionTime":"2026-01-30T11:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.585956 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.726777 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.731205 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.731239 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.731247 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.731261 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.731270 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:35Z","lastTransitionTime":"2026-01-30T11:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.782979 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.806905 4703 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.819852 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.832924 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.832974 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.833421 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.833469 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.833509 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.833525 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.833549 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.833565 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:35Z","lastTransitionTime":"2026-01-30T11:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.840558 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.842340 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.862686 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.883901 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.890986 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.909971 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.936388 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.936431 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.936441 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.936460 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.936474 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:35Z","lastTransitionTime":"2026-01-30T11:56:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.965385 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.966104 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.973502 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 30 11:56:35 crc kubenswrapper[4703]: I0130 11:56:35.995579 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.025867 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.039580 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.039612 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.039622 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.039637 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.039665 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:36Z","lastTransitionTime":"2026-01-30T11:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.041226 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.058443 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.085343 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.085815 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:36 crc kubenswrapper[4703]: E0130 11:56:36.085928 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:36 crc kubenswrapper[4703]: E0130 11:56:36.086175 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.086678 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.143879 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.143913 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.143926 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.143943 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.143955 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:36Z","lastTransitionTime":"2026-01-30T11:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.149284 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.160241 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.173851 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-vzhfb"] Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.174191 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-vzhfb" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.176654 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.176879 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.177238 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.177887 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.189746 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.191268 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.194255 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 11:12:03.143740051 +0000 UTC Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.205748 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.211435 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/d5b3826b-50d0-4b94-bc51-42e5945b80ed-serviceca\") pod \"node-ca-vzhfb\" (UID: \"d5b3826b-50d0-4b94-bc51-42e5945b80ed\") " pod="openshift-image-registry/node-ca-vzhfb" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.211520 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d5b3826b-50d0-4b94-bc51-42e5945b80ed-host\") pod \"node-ca-vzhfb\" (UID: \"d5b3826b-50d0-4b94-bc51-42e5945b80ed\") " pod="openshift-image-registry/node-ca-vzhfb" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.211545 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fxpq\" (UniqueName: \"kubernetes.io/projected/d5b3826b-50d0-4b94-bc51-42e5945b80ed-kube-api-access-4fxpq\") pod \"node-ca-vzhfb\" (UID: \"d5b3826b-50d0-4b94-bc51-42e5945b80ed\") " pod="openshift-image-registry/node-ca-vzhfb" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.217666 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.225227 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.229743 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.238984 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.245906 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.245943 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.245954 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.245972 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.245984 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:36Z","lastTransitionTime":"2026-01-30T11:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.247257 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.265852 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.282910 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.294505 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.301149 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.310354 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.312612 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fxpq\" (UniqueName: \"kubernetes.io/projected/d5b3826b-50d0-4b94-bc51-42e5945b80ed-kube-api-access-4fxpq\") pod \"node-ca-vzhfb\" (UID: \"d5b3826b-50d0-4b94-bc51-42e5945b80ed\") " pod="openshift-image-registry/node-ca-vzhfb" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.312660 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/d5b3826b-50d0-4b94-bc51-42e5945b80ed-serviceca\") pod \"node-ca-vzhfb\" (UID: \"d5b3826b-50d0-4b94-bc51-42e5945b80ed\") " pod="openshift-image-registry/node-ca-vzhfb" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.312700 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d5b3826b-50d0-4b94-bc51-42e5945b80ed-host\") pod \"node-ca-vzhfb\" (UID: \"d5b3826b-50d0-4b94-bc51-42e5945b80ed\") " pod="openshift-image-registry/node-ca-vzhfb" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.312750 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d5b3826b-50d0-4b94-bc51-42e5945b80ed-host\") pod \"node-ca-vzhfb\" (UID: \"d5b3826b-50d0-4b94-bc51-42e5945b80ed\") " pod="openshift-image-registry/node-ca-vzhfb" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.313793 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/d5b3826b-50d0-4b94-bc51-42e5945b80ed-serviceca\") pod \"node-ca-vzhfb\" (UID: \"d5b3826b-50d0-4b94-bc51-42e5945b80ed\") " pod="openshift-image-registry/node-ca-vzhfb" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.320784 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.329598 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fxpq\" (UniqueName: \"kubernetes.io/projected/d5b3826b-50d0-4b94-bc51-42e5945b80ed-kube-api-access-4fxpq\") pod \"node-ca-vzhfb\" (UID: \"d5b3826b-50d0-4b94-bc51-42e5945b80ed\") " pod="openshift-image-registry/node-ca-vzhfb" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.335364 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.344212 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.346734 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.348273 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.348323 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.348336 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.348355 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.348369 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:36Z","lastTransitionTime":"2026-01-30T11:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.357286 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.369876 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.405278 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerStarted","Data":"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0"} Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.405319 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerStarted","Data":"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4"} Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.405330 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerStarted","Data":"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37"} Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.405339 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerStarted","Data":"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2"} Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.405348 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerStarted","Data":"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e"} Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.405359 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerStarted","Data":"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c"} Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.406733 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" event={"ID":"32108840-3d15-43ae-b3d1-fa5b8eb931c7","Type":"ContainerStarted","Data":"6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d"} Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.412983 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.428950 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.434864 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.443196 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.445563 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.450609 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.450647 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.450658 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.450677 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.450689 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:36Z","lastTransitionTime":"2026-01-30T11:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.453702 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.461195 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.464512 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.466094 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.469316 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.478474 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.484112 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.488268 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.491063 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-vzhfb" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.493898 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: W0130 11:56:36.504925 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd5b3826b_50d0_4b94_bc51_42e5945b80ed.slice/crio-1ca6afa1bec5e397ad70deae77e0e2e461d6ac9861d2d81965afdb1b04b63d1b WatchSource:0}: Error finding container 1ca6afa1bec5e397ad70deae77e0e2e461d6ac9861d2d81965afdb1b04b63d1b: Status 404 returned error can't find the container with id 1ca6afa1bec5e397ad70deae77e0e2e461d6ac9861d2d81965afdb1b04b63d1b Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.507858 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.520000 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.531966 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.548310 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.552883 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.552919 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.552932 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.552950 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.552961 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:36Z","lastTransitionTime":"2026-01-30T11:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.562554 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.572840 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.586428 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.603075 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.632551 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:36Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.655311 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.655350 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.655359 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.655377 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.655387 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:36Z","lastTransitionTime":"2026-01-30T11:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.757382 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.757423 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.757432 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.757448 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.757458 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:36Z","lastTransitionTime":"2026-01-30T11:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.859721 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.859750 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.859761 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.859776 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.859788 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:36Z","lastTransitionTime":"2026-01-30T11:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.961839 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.961887 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.961897 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.961913 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:36 crc kubenswrapper[4703]: I0130 11:56:36.961923 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:36Z","lastTransitionTime":"2026-01-30T11:56:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.114570 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:37 crc kubenswrapper[4703]: E0130 11:56:37.114974 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.117209 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.117250 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.117263 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.117281 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.117293 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:37Z","lastTransitionTime":"2026-01-30T11:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.194443 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 15:14:11.299335357 +0000 UTC Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.219935 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.219989 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.219999 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.220018 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.220029 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:37Z","lastTransitionTime":"2026-01-30T11:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.327196 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.327223 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.327232 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.327247 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.327257 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:37Z","lastTransitionTime":"2026-01-30T11:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.410444 4703 generic.go:334] "Generic (PLEG): container finished" podID="32108840-3d15-43ae-b3d1-fa5b8eb931c7" containerID="6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d" exitCode=0 Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.410511 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" event={"ID":"32108840-3d15-43ae-b3d1-fa5b8eb931c7","Type":"ContainerDied","Data":"6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d"} Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.418901 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-vzhfb" event={"ID":"d5b3826b-50d0-4b94-bc51-42e5945b80ed","Type":"ContainerStarted","Data":"c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075"} Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.418936 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-vzhfb" event={"ID":"d5b3826b-50d0-4b94-bc51-42e5945b80ed","Type":"ContainerStarted","Data":"1ca6afa1bec5e397ad70deae77e0e2e461d6ac9861d2d81965afdb1b04b63d1b"} Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.429058 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.429101 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.429114 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.429152 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.429165 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:37Z","lastTransitionTime":"2026-01-30T11:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.457652 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.472834 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.494765 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.505706 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.521956 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.531719 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.531748 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.531757 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.531769 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.531777 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:37Z","lastTransitionTime":"2026-01-30T11:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.544235 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.559824 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.577354 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.593463 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.607794 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.634757 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.634796 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.634805 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.634822 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.634833 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:37Z","lastTransitionTime":"2026-01-30T11:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.635925 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.652316 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.667011 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.689889 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.707352 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.728216 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.737482 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.737565 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.737575 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.737598 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.737608 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:37Z","lastTransitionTime":"2026-01-30T11:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.748010 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.758536 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.771864 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.789466 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.801710 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.809152 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.819317 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.828670 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.843183 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.843223 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.843233 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.843249 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.843264 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:37Z","lastTransitionTime":"2026-01-30T11:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.846821 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.863479 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.877433 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.889416 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:37Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.946014 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.946061 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.946076 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.946101 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:37 crc kubenswrapper[4703]: I0130 11:56:37.946138 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:37Z","lastTransitionTime":"2026-01-30T11:56:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.048668 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.048709 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.048717 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.048733 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.048742 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:38Z","lastTransitionTime":"2026-01-30T11:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.085746 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.085803 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:38 crc kubenswrapper[4703]: E0130 11:56:38.085900 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:38 crc kubenswrapper[4703]: E0130 11:56:38.086274 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.151477 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.151514 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.151525 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.151541 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.151551 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:38Z","lastTransitionTime":"2026-01-30T11:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.194783 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 16:43:07.093000355 +0000 UTC Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.253833 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.253875 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.253886 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.253905 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.253918 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:38Z","lastTransitionTime":"2026-01-30T11:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.357389 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.357434 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.357445 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.357463 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.357475 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:38Z","lastTransitionTime":"2026-01-30T11:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.460385 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.460432 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.460448 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.460472 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.460488 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:38Z","lastTransitionTime":"2026-01-30T11:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.470400 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" event={"ID":"32108840-3d15-43ae-b3d1-fa5b8eb931c7","Type":"ContainerStarted","Data":"f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4"} Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.473107 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985"} Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.563189 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.563241 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.563257 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.563276 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.563290 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:38Z","lastTransitionTime":"2026-01-30T11:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.593366 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.617316 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.633372 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.652690 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.666040 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.666078 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.666088 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.666135 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.666146 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:38Z","lastTransitionTime":"2026-01-30T11:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.666881 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.691410 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.706878 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.720739 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.737898 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.748319 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.761324 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.768160 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.768205 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.768216 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.768234 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.768245 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:38Z","lastTransitionTime":"2026-01-30T11:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.775934 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.788038 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.804739 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.823836 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.837419 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.853722 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.866895 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.871415 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.871453 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.871464 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.871480 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.871493 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:38Z","lastTransitionTime":"2026-01-30T11:56:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:38 crc kubenswrapper[4703]: I0130 11:56:38.882787 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.006560 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.006624 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.006635 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.006649 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.006662 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:39Z","lastTransitionTime":"2026-01-30T11:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.064802 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.075910 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.085457 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:39 crc kubenswrapper[4703]: E0130 11:56:39.085658 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.089060 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.100543 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.111905 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.121804 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.131522 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.141811 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.153159 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.195536 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 23:31:16.344248546 +0000 UTC Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.242071 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.242338 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.242481 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.242619 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.242859 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:39Z","lastTransitionTime":"2026-01-30T11:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.345688 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.345989 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.346072 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.346182 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.346277 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:39Z","lastTransitionTime":"2026-01-30T11:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.449257 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.449313 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.449343 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.449376 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.449415 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:39Z","lastTransitionTime":"2026-01-30T11:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.481002 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerStarted","Data":"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8"} Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.483277 4703 generic.go:334] "Generic (PLEG): container finished" podID="32108840-3d15-43ae-b3d1-fa5b8eb931c7" containerID="f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4" exitCode=0 Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.483844 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" event={"ID":"32108840-3d15-43ae-b3d1-fa5b8eb931c7","Type":"ContainerDied","Data":"f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4"} Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.507261 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.519662 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.534317 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.545810 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.553418 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.553470 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.553480 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.553496 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.553506 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:39Z","lastTransitionTime":"2026-01-30T11:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.559704 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.572526 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.588162 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.601951 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.614506 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.627154 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.640465 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.651909 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.655455 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.655494 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.655506 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.655523 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.655533 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:39Z","lastTransitionTime":"2026-01-30T11:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.665809 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.675991 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:39Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.758382 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.758419 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.758432 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.758450 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.758463 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:39Z","lastTransitionTime":"2026-01-30T11:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.861542 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.861640 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.861653 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.861714 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.861735 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:39Z","lastTransitionTime":"2026-01-30T11:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.964641 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.964699 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.964710 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.964733 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:39 crc kubenswrapper[4703]: I0130 11:56:39.964749 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:39Z","lastTransitionTime":"2026-01-30T11:56:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.066922 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.066962 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.066977 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.066994 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.067006 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:40Z","lastTransitionTime":"2026-01-30T11:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.085624 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.085624 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:40 crc kubenswrapper[4703]: E0130 11:56:40.085833 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:40 crc kubenswrapper[4703]: E0130 11:56:40.086421 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.170617 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.170672 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.170685 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.170710 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.170723 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:40Z","lastTransitionTime":"2026-01-30T11:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.196536 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 17:23:25.599192896 +0000 UTC Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.272633 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.272673 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.272684 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.272700 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.272711 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:40Z","lastTransitionTime":"2026-01-30T11:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.375088 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.375146 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.375155 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.375170 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.375179 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:40Z","lastTransitionTime":"2026-01-30T11:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.477650 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.477691 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.477699 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.477718 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.477728 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:40Z","lastTransitionTime":"2026-01-30T11:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.488285 4703 generic.go:334] "Generic (PLEG): container finished" podID="32108840-3d15-43ae-b3d1-fa5b8eb931c7" containerID="0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda" exitCode=0 Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.488327 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" event={"ID":"32108840-3d15-43ae-b3d1-fa5b8eb931c7","Type":"ContainerDied","Data":"0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda"} Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.500035 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.517640 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.531073 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.543084 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.558250 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.576902 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.579938 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.579973 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.579983 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.580000 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.580011 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:40Z","lastTransitionTime":"2026-01-30T11:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.591700 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.603795 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.618018 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.632372 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.644661 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.658421 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.673702 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.682353 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.682384 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.682393 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.682412 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.682422 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:40Z","lastTransitionTime":"2026-01-30T11:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.692838 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.785355 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.785938 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.785981 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.786043 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.786080 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:40Z","lastTransitionTime":"2026-01-30T11:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.848417 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:56:40 crc kubenswrapper[4703]: E0130 11:56:40.848623 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:56:56.84858493 +0000 UTC m=+52.626406594 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.848717 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.848784 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:40 crc kubenswrapper[4703]: E0130 11:56:40.849041 4703 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:56:40 crc kubenswrapper[4703]: E0130 11:56:40.849108 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:56.849095005 +0000 UTC m=+52.626916809 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:56:40 crc kubenswrapper[4703]: E0130 11:56:40.849115 4703 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:56:40 crc kubenswrapper[4703]: E0130 11:56:40.849241 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:56.849219158 +0000 UTC m=+52.627040812 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.890416 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.890464 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.890475 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.890493 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.890503 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:40Z","lastTransitionTime":"2026-01-30T11:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.949925 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.950012 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:40 crc kubenswrapper[4703]: E0130 11:56:40.950140 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:56:40 crc kubenswrapper[4703]: E0130 11:56:40.950146 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:56:40 crc kubenswrapper[4703]: E0130 11:56:40.950189 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:56:40 crc kubenswrapper[4703]: E0130 11:56:40.950155 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:56:40 crc kubenswrapper[4703]: E0130 11:56:40.950205 4703 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:40 crc kubenswrapper[4703]: E0130 11:56:40.950213 4703 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:40 crc kubenswrapper[4703]: E0130 11:56:40.950261 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:56.950249093 +0000 UTC m=+52.728070747 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:40 crc kubenswrapper[4703]: E0130 11:56:40.950276 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-30 11:56:56.950270114 +0000 UTC m=+52.728091768 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.992335 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.992371 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.992379 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.992394 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:40 crc kubenswrapper[4703]: I0130 11:56:40.992403 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:40Z","lastTransitionTime":"2026-01-30T11:56:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.086096 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:41 crc kubenswrapper[4703]: E0130 11:56:41.086249 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.094014 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.094049 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.094059 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.094075 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.094086 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:41Z","lastTransitionTime":"2026-01-30T11:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.196314 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.196360 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.196374 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.196392 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.196402 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:41Z","lastTransitionTime":"2026-01-30T11:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.196750 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 01:59:26.770989001 +0000 UTC Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.298576 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.298607 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.298615 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.298641 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.298650 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:41Z","lastTransitionTime":"2026-01-30T11:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.400747 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.400781 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.400790 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.400807 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.400816 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:41Z","lastTransitionTime":"2026-01-30T11:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.500891 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" event={"ID":"32108840-3d15-43ae-b3d1-fa5b8eb931c7","Type":"ContainerStarted","Data":"21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2"} Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.502634 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.502696 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.502722 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.502754 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.502795 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:41Z","lastTransitionTime":"2026-01-30T11:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.508487 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerStarted","Data":"a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515"} Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.508816 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.525319 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.544634 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.546357 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.571768 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.592374 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.604970 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.605005 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.605016 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.605033 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.605045 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:41Z","lastTransitionTime":"2026-01-30T11:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.643569 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.667280 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.680360 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.689934 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.703412 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.707147 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.707175 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.707183 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.707198 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.707208 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:41Z","lastTransitionTime":"2026-01-30T11:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.721054 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.734580 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.747826 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.759442 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.770946 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.783562 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.794186 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.805416 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.808888 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.808929 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.808941 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.808957 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.808969 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:41Z","lastTransitionTime":"2026-01-30T11:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.818786 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.835264 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.855301 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.867380 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.876667 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.891926 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.905728 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.912243 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.912282 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.912294 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.912311 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.912323 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:41Z","lastTransitionTime":"2026-01-30T11:56:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.921214 4703 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.921516 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.934906 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.948521 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:41 crc kubenswrapper[4703]: I0130 11:56:41.962905 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:41Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.014368 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.014430 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.014446 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.014464 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.014476 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:42Z","lastTransitionTime":"2026-01-30T11:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.085303 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.085342 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:42 crc kubenswrapper[4703]: E0130 11:56:42.085448 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:42 crc kubenswrapper[4703]: E0130 11:56:42.085529 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.117764 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.117816 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.117828 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.117847 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.117861 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:42Z","lastTransitionTime":"2026-01-30T11:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.197596 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 19:35:32.365757446 +0000 UTC Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.221417 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.221482 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.221494 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.221517 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.221532 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:42Z","lastTransitionTime":"2026-01-30T11:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.239297 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.239342 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.239354 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.239374 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.239386 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:42Z","lastTransitionTime":"2026-01-30T11:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:42 crc kubenswrapper[4703]: E0130 11:56:42.274584 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.278670 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.278707 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.278718 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.278733 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.278744 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:42Z","lastTransitionTime":"2026-01-30T11:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:42 crc kubenswrapper[4703]: E0130 11:56:42.294230 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.299936 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.299972 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.299983 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.300002 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.300013 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:42Z","lastTransitionTime":"2026-01-30T11:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:42 crc kubenswrapper[4703]: E0130 11:56:42.314787 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.318678 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.318705 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.318712 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.318726 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.318735 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:42Z","lastTransitionTime":"2026-01-30T11:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:42 crc kubenswrapper[4703]: E0130 11:56:42.330962 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.339944 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.340021 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.340039 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.340063 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.340079 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:42Z","lastTransitionTime":"2026-01-30T11:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:42 crc kubenswrapper[4703]: E0130 11:56:42.370067 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: E0130 11:56:42.370226 4703 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.372805 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.372841 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.372851 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.372869 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.372879 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:42Z","lastTransitionTime":"2026-01-30T11:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.475582 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.475644 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.475659 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.475682 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.475699 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:42Z","lastTransitionTime":"2026-01-30T11:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.511497 4703 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.512100 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.579356 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.579402 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.579411 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.579426 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.579435 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:42Z","lastTransitionTime":"2026-01-30T11:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.592922 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.608728 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.622559 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.636306 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.652393 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.669267 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.681873 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.682094 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.682141 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.682166 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.682180 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:42Z","lastTransitionTime":"2026-01-30T11:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.684692 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.705474 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.717010 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.731265 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.747809 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.756662 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.767412 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.775325 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.784616 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.784642 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.784650 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.784666 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.784679 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:42Z","lastTransitionTime":"2026-01-30T11:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.789215 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:42Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.888230 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.888289 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.888299 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.888339 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.888354 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:42Z","lastTransitionTime":"2026-01-30T11:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.913445 4703 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.991269 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.991307 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.991318 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.991335 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:42 crc kubenswrapper[4703]: I0130 11:56:42.991345 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:42Z","lastTransitionTime":"2026-01-30T11:56:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.085943 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:43 crc kubenswrapper[4703]: E0130 11:56:43.086162 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.087221 4703 scope.go:117] "RemoveContainer" containerID="173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.093065 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.093181 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.093197 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.093219 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.093235 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:43Z","lastTransitionTime":"2026-01-30T11:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.196482 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.198832 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 15:29:06.114613622 +0000 UTC Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.198959 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.198984 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.199008 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.199021 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:43Z","lastTransitionTime":"2026-01-30T11:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.302402 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.302457 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.302470 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.302505 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.302519 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:43Z","lastTransitionTime":"2026-01-30T11:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.406794 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.406836 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.406846 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.406876 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.406891 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:43Z","lastTransitionTime":"2026-01-30T11:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.510848 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.510893 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.510905 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.510926 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.510938 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:43Z","lastTransitionTime":"2026-01-30T11:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.519829 4703 generic.go:334] "Generic (PLEG): container finished" podID="32108840-3d15-43ae-b3d1-fa5b8eb931c7" containerID="21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2" exitCode=0 Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.519920 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" event={"ID":"32108840-3d15-43ae-b3d1-fa5b8eb931c7","Type":"ContainerDied","Data":"21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2"} Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.520186 4703 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.550289 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:43Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.586146 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:43Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.606051 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:43Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.615502 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.615565 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.615578 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.615603 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.615618 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:43Z","lastTransitionTime":"2026-01-30T11:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.621495 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:43Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.639159 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:43Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.660416 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:43Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.667104 4703 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.673636 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:43Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.690838 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:43Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.704174 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:43Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.719369 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.719432 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.719448 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.719486 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.719503 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:43Z","lastTransitionTime":"2026-01-30T11:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.724110 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:43Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.739108 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:43Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.753747 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:43Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.768468 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:43Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.781635 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:43Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.823363 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.823435 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.823444 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.823467 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.823479 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:43Z","lastTransitionTime":"2026-01-30T11:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.926615 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.926663 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.926674 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.926694 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:43 crc kubenswrapper[4703]: I0130 11:56:43.926707 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:43Z","lastTransitionTime":"2026-01-30T11:56:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.031411 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.031900 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.031914 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.031938 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.031950 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:44Z","lastTransitionTime":"2026-01-30T11:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.085430 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.085491 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:44 crc kubenswrapper[4703]: E0130 11:56:44.085661 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:44 crc kubenswrapper[4703]: E0130 11:56:44.085901 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.134594 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.134647 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.134663 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.134698 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.134714 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:44Z","lastTransitionTime":"2026-01-30T11:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.199661 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 17:52:01.01572298 +0000 UTC Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.238426 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.238471 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.238480 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.238517 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.238534 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:44Z","lastTransitionTime":"2026-01-30T11:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.341672 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.341716 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.341728 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.341746 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.341759 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:44Z","lastTransitionTime":"2026-01-30T11:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.444173 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.444218 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.444228 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.444245 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.444257 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:44Z","lastTransitionTime":"2026-01-30T11:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.528489 4703 generic.go:334] "Generic (PLEG): container finished" podID="32108840-3d15-43ae-b3d1-fa5b8eb931c7" containerID="8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef" exitCode=0 Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.528582 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" event={"ID":"32108840-3d15-43ae-b3d1-fa5b8eb931c7","Type":"ContainerDied","Data":"8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef"} Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.530906 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.532664 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f"} Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.532808 4703 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.533421 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.544107 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.547659 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.547691 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.547701 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.547720 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.547732 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:44Z","lastTransitionTime":"2026-01-30T11:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.560477 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.573910 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.587667 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.605909 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.633415 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.646368 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.650769 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.650800 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.650811 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.650828 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.650839 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:44Z","lastTransitionTime":"2026-01-30T11:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.664488 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.680485 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.711602 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.730399 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.740614 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.754297 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.754368 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.754354 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.754388 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.754611 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.754636 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:44Z","lastTransitionTime":"2026-01-30T11:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.770100 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.773401 4703 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.785028 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.808141 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.822963 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.837612 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.857168 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.857198 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.857208 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.857223 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.857235 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:44Z","lastTransitionTime":"2026-01-30T11:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.859356 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.879816 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.895729 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.910165 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.925705 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.950770 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.960543 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.960573 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.960581 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.960596 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.960605 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:44Z","lastTransitionTime":"2026-01-30T11:56:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.970757 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.984682 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:44 crc kubenswrapper[4703]: I0130 11:56:44.998587 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.020552 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.062543 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.062572 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.062584 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.062601 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.062610 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:45Z","lastTransitionTime":"2026-01-30T11:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.153669 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:45 crc kubenswrapper[4703]: E0130 11:56:45.153851 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.165226 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.165280 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.165292 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.165313 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.165326 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:45Z","lastTransitionTime":"2026-01-30T11:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.172954 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.187503 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.199852 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 16:09:16.294782997 +0000 UTC Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.202680 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.214311 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.231583 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.245200 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.258779 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.280656 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.280701 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.280712 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.280734 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.280745 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:45Z","lastTransitionTime":"2026-01-30T11:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.292155 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.305082 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.318308 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.331552 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.357883 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.374902 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.382836 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.382876 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.382888 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.382907 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.382919 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:45Z","lastTransitionTime":"2026-01-30T11:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.434525 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.485393 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.485437 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.485447 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.485464 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.485476 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:45Z","lastTransitionTime":"2026-01-30T11:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.665967 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.665993 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.666000 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.666013 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.666021 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:45Z","lastTransitionTime":"2026-01-30T11:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.768578 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.768633 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.768662 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.768682 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.768694 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:45Z","lastTransitionTime":"2026-01-30T11:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.871660 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.871711 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.871725 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.871746 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.871760 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:45Z","lastTransitionTime":"2026-01-30T11:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.986427 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.986487 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.986502 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.986525 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:45 crc kubenswrapper[4703]: I0130 11:56:45.986542 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:45Z","lastTransitionTime":"2026-01-30T11:56:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.085301 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:46 crc kubenswrapper[4703]: E0130 11:56:46.085446 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.085663 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:46 crc kubenswrapper[4703]: E0130 11:56:46.085884 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.089286 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.089330 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.089344 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.089361 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.089372 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:46Z","lastTransitionTime":"2026-01-30T11:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.191989 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.192044 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.192053 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.192456 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.192492 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:46Z","lastTransitionTime":"2026-01-30T11:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.199294 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6"] Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.199873 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.200065 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 01:04:56.504618538 +0000 UTC Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.202175 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.202308 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.213924 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:46Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.226939 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:46Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.241931 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:46Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.257602 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:46Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.269478 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1bc71a36-b76e-48cf-a2a5-34b8251b7e15-env-overrides\") pod \"ovnkube-control-plane-749d76644c-g29v6\" (UID: \"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.269525 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1bc71a36-b76e-48cf-a2a5-34b8251b7e15-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-g29v6\" (UID: \"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.269551 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlvjk\" (UniqueName: \"kubernetes.io/projected/1bc71a36-b76e-48cf-a2a5-34b8251b7e15-kube-api-access-vlvjk\") pod \"ovnkube-control-plane-749d76644c-g29v6\" (UID: \"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.269573 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1bc71a36-b76e-48cf-a2a5-34b8251b7e15-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-g29v6\" (UID: \"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.269979 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:46Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.282530 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:46Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.294116 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.294156 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.294164 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.294177 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.294187 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:46Z","lastTransitionTime":"2026-01-30T11:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.302242 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:46Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.319080 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:46Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.329608 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:46Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.345898 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:46Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.360587 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:46Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.370431 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1bc71a36-b76e-48cf-a2a5-34b8251b7e15-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-g29v6\" (UID: \"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.370499 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlvjk\" (UniqueName: \"kubernetes.io/projected/1bc71a36-b76e-48cf-a2a5-34b8251b7e15-kube-api-access-vlvjk\") pod \"ovnkube-control-plane-749d76644c-g29v6\" (UID: \"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.370536 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1bc71a36-b76e-48cf-a2a5-34b8251b7e15-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-g29v6\" (UID: \"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.370580 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1bc71a36-b76e-48cf-a2a5-34b8251b7e15-env-overrides\") pod \"ovnkube-control-plane-749d76644c-g29v6\" (UID: \"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.371184 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/1bc71a36-b76e-48cf-a2a5-34b8251b7e15-env-overrides\") pod \"ovnkube-control-plane-749d76644c-g29v6\" (UID: \"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.371491 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/1bc71a36-b76e-48cf-a2a5-34b8251b7e15-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-g29v6\" (UID: \"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.376080 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/1bc71a36-b76e-48cf-a2a5-34b8251b7e15-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-g29v6\" (UID: \"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.378709 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:46Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.392478 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:46Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.396065 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.396104 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.396139 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.396157 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.396169 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:46Z","lastTransitionTime":"2026-01-30T11:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.396859 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlvjk\" (UniqueName: \"kubernetes.io/projected/1bc71a36-b76e-48cf-a2a5-34b8251b7e15-kube-api-access-vlvjk\") pod \"ovnkube-control-plane-749d76644c-g29v6\" (UID: \"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.407263 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:46Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.418664 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:46Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.499056 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.499105 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.499117 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.499152 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.499165 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:46Z","lastTransitionTime":"2026-01-30T11:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.514532 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" Jan 30 11:56:46 crc kubenswrapper[4703]: W0130 11:56:46.529099 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1bc71a36_b76e_48cf_a2a5_34b8251b7e15.slice/crio-2647069a3391a9224418bb2bc5a129148d73a8d7ead3e716bc3e6de29718a69e WatchSource:0}: Error finding container 2647069a3391a9224418bb2bc5a129148d73a8d7ead3e716bc3e6de29718a69e: Status 404 returned error can't find the container with id 2647069a3391a9224418bb2bc5a129148d73a8d7ead3e716bc3e6de29718a69e Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.601919 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.601976 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.601986 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.602006 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.602017 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:46Z","lastTransitionTime":"2026-01-30T11:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.671732 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" event={"ID":"1bc71a36-b76e-48cf-a2a5-34b8251b7e15","Type":"ContainerStarted","Data":"2647069a3391a9224418bb2bc5a129148d73a8d7ead3e716bc3e6de29718a69e"} Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.704565 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.704609 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.704621 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.704638 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.704649 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:46Z","lastTransitionTime":"2026-01-30T11:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.806910 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.806957 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.806974 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.806991 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.807003 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:46Z","lastTransitionTime":"2026-01-30T11:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.910277 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.910315 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.910325 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.910342 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:46 crc kubenswrapper[4703]: I0130 11:56:46.910350 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:46Z","lastTransitionTime":"2026-01-30T11:56:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.013548 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.013588 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.013597 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.013616 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.013626 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:47Z","lastTransitionTime":"2026-01-30T11:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.086749 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:47 crc kubenswrapper[4703]: E0130 11:56:47.087015 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.115916 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.115961 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.115972 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.115990 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.116034 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:47Z","lastTransitionTime":"2026-01-30T11:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.200785 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 08:52:55.421336175 +0000 UTC Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.218788 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.218826 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.218834 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.218851 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.218861 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:47Z","lastTransitionTime":"2026-01-30T11:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.321227 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.321510 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.321520 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.321537 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.321547 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:47Z","lastTransitionTime":"2026-01-30T11:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.423296 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.423337 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.423345 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.423384 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.423394 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:47Z","lastTransitionTime":"2026-01-30T11:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.526337 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.526374 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.526384 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.526400 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.526410 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:47Z","lastTransitionTime":"2026-01-30T11:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.629729 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.629778 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.629791 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.629810 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.629823 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:47Z","lastTransitionTime":"2026-01-30T11:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.678696 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" event={"ID":"32108840-3d15-43ae-b3d1-fa5b8eb931c7","Type":"ContainerStarted","Data":"cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0"} Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.680836 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" event={"ID":"1bc71a36-b76e-48cf-a2a5-34b8251b7e15","Type":"ContainerStarted","Data":"adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c"} Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.692356 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:47Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.703672 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:47Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.715089 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:47Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.732639 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.732677 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.732688 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.732704 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.732740 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:47Z","lastTransitionTime":"2026-01-30T11:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.743903 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:47Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.757335 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:47Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.772986 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:47Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.826916 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:47Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.839351 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.839386 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.839394 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.839412 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.839422 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:47Z","lastTransitionTime":"2026-01-30T11:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.847065 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:47Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.859838 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:47Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.870341 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:47Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.880316 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:47Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.893364 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:47Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.906865 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:47Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.931489 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:47Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.942556 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.942594 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.942602 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.942620 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.942632 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:47Z","lastTransitionTime":"2026-01-30T11:56:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:47 crc kubenswrapper[4703]: I0130 11:56:47.946518 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:47Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.045065 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.045113 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.045140 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.045160 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.045175 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:48Z","lastTransitionTime":"2026-01-30T11:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.085330 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.085400 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:48 crc kubenswrapper[4703]: E0130 11:56:48.085479 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:48 crc kubenswrapper[4703]: E0130 11:56:48.085554 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.106660 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-qrt92"] Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.107247 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:56:48 crc kubenswrapper[4703]: E0130 11:56:48.107325 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.113717 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs\") pod \"network-metrics-daemon-qrt92\" (UID: \"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\") " pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.113788 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pmjt\" (UniqueName: \"kubernetes.io/projected/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-kube-api-access-2pmjt\") pod \"network-metrics-daemon-qrt92\" (UID: \"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\") " pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.120518 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.133557 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.144327 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.147995 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.148049 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.148065 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.148086 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.148099 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:48Z","lastTransitionTime":"2026-01-30T11:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.157326 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.171470 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.182670 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.195275 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.201616 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 07:39:07.532204114 +0000 UTC Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.206627 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.214885 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pmjt\" (UniqueName: \"kubernetes.io/projected/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-kube-api-access-2pmjt\") pod \"network-metrics-daemon-qrt92\" (UID: \"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\") " pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.214949 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs\") pod \"network-metrics-daemon-qrt92\" (UID: \"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\") " pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:56:48 crc kubenswrapper[4703]: E0130 11:56:48.215034 4703 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 11:56:48 crc kubenswrapper[4703]: E0130 11:56:48.215092 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs podName:ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd nodeName:}" failed. No retries permitted until 2026-01-30 11:56:48.715079828 +0000 UTC m=+44.492901482 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs") pod "network-metrics-daemon-qrt92" (UID: "ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.217770 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.234226 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.235490 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pmjt\" (UniqueName: \"kubernetes.io/projected/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-kube-api-access-2pmjt\") pod \"network-metrics-daemon-qrt92\" (UID: \"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\") " pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.246279 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.251969 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.252025 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.252043 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.252068 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.252087 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:48Z","lastTransitionTime":"2026-01-30T11:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.261328 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.276961 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.291492 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.314796 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.329623 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.354812 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.354874 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.354890 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.354907 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.354919 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:48Z","lastTransitionTime":"2026-01-30T11:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.457980 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.458023 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.458035 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.458056 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.458108 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:48Z","lastTransitionTime":"2026-01-30T11:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.560400 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.560442 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.560453 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.560470 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.560482 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:48Z","lastTransitionTime":"2026-01-30T11:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.663069 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.663183 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.663207 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.663275 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.663302 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:48Z","lastTransitionTime":"2026-01-30T11:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.686735 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" event={"ID":"1bc71a36-b76e-48cf-a2a5-34b8251b7e15","Type":"ContainerStarted","Data":"0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda"} Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.689392 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovnkube-controller/0.log" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.693624 4703 generic.go:334] "Generic (PLEG): container finished" podID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerID="a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515" exitCode=1 Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.693680 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerDied","Data":"a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515"} Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.694501 4703 scope.go:117] "RemoveContainer" containerID="a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.719462 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs\") pod \"network-metrics-daemon-qrt92\" (UID: \"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\") " pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:56:48 crc kubenswrapper[4703]: E0130 11:56:48.719868 4703 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 11:56:48 crc kubenswrapper[4703]: E0130 11:56:48.719999 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs podName:ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd nodeName:}" failed. No retries permitted until 2026-01-30 11:56:49.719970715 +0000 UTC m=+45.497792419 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs") pod "network-metrics-daemon-qrt92" (UID: "ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.722613 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.736012 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.751350 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.764959 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.766223 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.766252 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.766263 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.766278 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.766289 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:48Z","lastTransitionTime":"2026-01-30T11:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.780963 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.791737 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.810963 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.826654 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.841726 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.854529 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.868848 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.868900 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.868914 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.868932 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.868945 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:48Z","lastTransitionTime":"2026-01-30T11:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.871192 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.885400 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.900269 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.913625 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.931418 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.945616 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.958791 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.972076 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.972141 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.972163 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.972144 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.972181 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.972286 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:48Z","lastTransitionTime":"2026-01-30T11:56:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:48 crc kubenswrapper[4703]: I0130 11:56:48.989418 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:48Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.004904 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:49Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.018427 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:49Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.033113 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:49Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.043525 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:49Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.066231 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"message\\\":\\\" 11:56:47.639094 5910 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0130 11:56:47.639106 5910 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0130 11:56:47.639103 5910 handler.go:208] Removed *v1.Node event handler 7\\\\nI0130 11:56:47.639141 5910 handler.go:208] Removed *v1.Node event handler 2\\\\nI0130 11:56:47.639153 5910 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0130 11:56:47.639157 5910 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0130 11:56:47.639171 5910 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0130 11:56:47.639238 5910 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0130 11:56:47.639307 5910 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0130 11:56:47.639343 5910 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0130 11:56:47.639406 5910 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0130 11:56:47.639348 5910 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0130 11:56:47.639457 5910 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0130 11:56:47.639493 5910 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0130 11:56:47.639513 5910 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0130 11:56:47.639504 5910 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:49Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.074982 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.075036 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.075094 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.075113 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.075160 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:49Z","lastTransitionTime":"2026-01-30T11:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.081197 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:49Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.086221 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:49 crc kubenswrapper[4703]: E0130 11:56:49.086348 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.091608 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:49Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.105464 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:49Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.115309 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:49Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.128071 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:49Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.145360 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:49Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.160767 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:49Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.178095 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.178185 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.178203 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.178232 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.178246 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:49Z","lastTransitionTime":"2026-01-30T11:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.178212 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:49Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.202470 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 06:48:57.236370168 +0000 UTC Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.281064 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.281183 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.281209 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.281241 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.281263 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:49Z","lastTransitionTime":"2026-01-30T11:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.384394 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.384448 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.384456 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.384471 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.384484 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:49Z","lastTransitionTime":"2026-01-30T11:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.486669 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.486710 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.486720 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.486735 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.486743 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:49Z","lastTransitionTime":"2026-01-30T11:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.589041 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.589092 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.589110 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.589145 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.589159 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:49Z","lastTransitionTime":"2026-01-30T11:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.694811 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.694890 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.694941 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.695022 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.695050 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:49Z","lastTransitionTime":"2026-01-30T11:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.794778 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs\") pod \"network-metrics-daemon-qrt92\" (UID: \"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\") " pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:56:49 crc kubenswrapper[4703]: E0130 11:56:49.794959 4703 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 11:56:49 crc kubenswrapper[4703]: E0130 11:56:49.795012 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs podName:ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd nodeName:}" failed. No retries permitted until 2026-01-30 11:56:51.794997157 +0000 UTC m=+47.572818811 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs") pod "network-metrics-daemon-qrt92" (UID: "ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.896431 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.896472 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.896493 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.896512 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.896525 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:49Z","lastTransitionTime":"2026-01-30T11:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.998607 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.998647 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.998689 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.998709 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:49 crc kubenswrapper[4703]: I0130 11:56:49.998750 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:49Z","lastTransitionTime":"2026-01-30T11:56:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.086315 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.086381 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.086419 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:56:50 crc kubenswrapper[4703]: E0130 11:56:50.086468 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:50 crc kubenswrapper[4703]: E0130 11:56:50.086591 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:56:50 crc kubenswrapper[4703]: E0130 11:56:50.086718 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.100538 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.100576 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.100587 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.100608 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.100629 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:50Z","lastTransitionTime":"2026-01-30T11:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.202676 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 05:42:25.839023576 +0000 UTC Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.203044 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.203100 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.203112 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.203154 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.203168 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:50Z","lastTransitionTime":"2026-01-30T11:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.305361 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.305418 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.305429 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.305446 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.305457 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:50Z","lastTransitionTime":"2026-01-30T11:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.407960 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.408370 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.408387 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.408408 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.408420 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:50Z","lastTransitionTime":"2026-01-30T11:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.509984 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.510013 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.510021 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.510036 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.510044 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:50Z","lastTransitionTime":"2026-01-30T11:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.612412 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.612453 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.612465 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.612481 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.612494 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:50Z","lastTransitionTime":"2026-01-30T11:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.714871 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.714908 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.714920 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.714935 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.714943 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:50Z","lastTransitionTime":"2026-01-30T11:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.798706 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovnkube-controller/0.log" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.801553 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerStarted","Data":"b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468"} Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.801705 4703 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.816984 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.817010 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.817018 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.817035 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.817045 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:50Z","lastTransitionTime":"2026-01-30T11:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.818858 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:50Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.832530 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:50Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.914304 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:50Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.919321 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.919361 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.919373 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.919398 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.919410 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:50Z","lastTransitionTime":"2026-01-30T11:56:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:50 crc kubenswrapper[4703]: I0130 11:56:50.990670 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:50Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.004265 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.017078 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.021804 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.021846 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.021856 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.021871 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.021881 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:51Z","lastTransitionTime":"2026-01-30T11:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.033284 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.044916 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.061099 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.074471 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.085801 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:51 crc kubenswrapper[4703]: E0130 11:56:51.085931 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.099698 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"message\\\":\\\" 11:56:47.639094 5910 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0130 11:56:47.639106 5910 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0130 11:56:47.639103 5910 handler.go:208] Removed *v1.Node event handler 7\\\\nI0130 11:56:47.639141 5910 handler.go:208] Removed *v1.Node event handler 2\\\\nI0130 11:56:47.639153 5910 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0130 11:56:47.639157 5910 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0130 11:56:47.639171 5910 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0130 11:56:47.639238 5910 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0130 11:56:47.639307 5910 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0130 11:56:47.639343 5910 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0130 11:56:47.639406 5910 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0130 11:56:47.639348 5910 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0130 11:56:47.639457 5910 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0130 11:56:47.639493 5910 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0130 11:56:47.639513 5910 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0130 11:56:47.639504 5910 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.119398 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.124483 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.124523 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.124532 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.124548 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.124558 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:51Z","lastTransitionTime":"2026-01-30T11:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.134269 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.146424 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.161193 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.172351 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.203504 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 22:04:35.992504944 +0000 UTC Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.226921 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.226982 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.227003 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.227020 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.227032 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:51Z","lastTransitionTime":"2026-01-30T11:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.329053 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.329093 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.329104 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.329137 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.329151 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:51Z","lastTransitionTime":"2026-01-30T11:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.431756 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.431793 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.431804 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.431822 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.431833 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:51Z","lastTransitionTime":"2026-01-30T11:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.534563 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.534614 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.534628 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.534644 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.534656 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:51Z","lastTransitionTime":"2026-01-30T11:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.637397 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.637461 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.637483 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.637506 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.637520 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:51Z","lastTransitionTime":"2026-01-30T11:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.740278 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.740350 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.740368 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.740400 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.740419 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:51Z","lastTransitionTime":"2026-01-30T11:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.808185 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovnkube-controller/1.log" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.809040 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovnkube-controller/0.log" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.812484 4703 generic.go:334] "Generic (PLEG): container finished" podID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerID="b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468" exitCode=1 Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.812538 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerDied","Data":"b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468"} Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.812593 4703 scope.go:117] "RemoveContainer" containerID="a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.813934 4703 scope.go:117] "RemoveContainer" containerID="b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468" Jan 30 11:56:51 crc kubenswrapper[4703]: E0130 11:56:51.814214 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.831063 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs\") pod \"network-metrics-daemon-qrt92\" (UID: \"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\") " pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:56:51 crc kubenswrapper[4703]: E0130 11:56:51.831455 4703 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 11:56:51 crc kubenswrapper[4703]: E0130 11:56:51.831586 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs podName:ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd nodeName:}" failed. No retries permitted until 2026-01-30 11:56:55.831557044 +0000 UTC m=+51.609378698 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs") pod "network-metrics-daemon-qrt92" (UID: "ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.834172 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.843509 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.843554 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.843566 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.843587 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.843599 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:51Z","lastTransitionTime":"2026-01-30T11:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.856649 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.872858 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.890050 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.907247 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.928707 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.946757 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.946815 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.946828 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.946850 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.946866 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:51Z","lastTransitionTime":"2026-01-30T11:56:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.959080 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"message\\\":\\\" 11:56:47.639094 5910 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0130 11:56:47.639106 5910 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0130 11:56:47.639103 5910 handler.go:208] Removed *v1.Node event handler 7\\\\nI0130 11:56:47.639141 5910 handler.go:208] Removed *v1.Node event handler 2\\\\nI0130 11:56:47.639153 5910 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0130 11:56:47.639157 5910 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0130 11:56:47.639171 5910 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0130 11:56:47.639238 5910 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0130 11:56:47.639307 5910 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0130 11:56:47.639343 5910 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0130 11:56:47.639406 5910 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0130 11:56:47.639348 5910 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0130 11:56:47.639457 5910 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0130 11:56:47.639493 5910 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0130 11:56:47.639513 5910 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0130 11:56:47.639504 5910 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:56:51Z\\\",\\\"message\\\":\\\"ng *v1.Pod event handler 6 for removal\\\\nI0130 11:56:51.219927 6167 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0130 11:56:51.219948 6167 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0130 11:56:51.219964 6167 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0130 11:56:51.221516 6167 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0130 11:56:51.221579 6167 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0130 11:56:51.221649 6167 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0130 11:56:51.221704 6167 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0130 11:56:51.221656 6167 handler.go:208] Removed *v1.Node event handler 2\\\\nI0130 11:56:51.221770 6167 handler.go:208] Removed *v1.Node event handler 7\\\\nI0130 11:56:51.221773 6167 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0130 11:56:51.221824 6167 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0130 11:56:51.221892 6167 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0130 11:56:51.221905 6167 factory.go:656] Stopping watch factory\\\\nI0130 11:56:51.221917 6167 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.974553 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:51 crc kubenswrapper[4703]: I0130 11:56:51.989533 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:51Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.008382 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.023435 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.043715 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.050115 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.050197 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.050210 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.050228 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.050240 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:52Z","lastTransitionTime":"2026-01-30T11:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.061360 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.077617 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.085926 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.085971 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.086039 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:52 crc kubenswrapper[4703]: E0130 11:56:52.086173 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:56:52 crc kubenswrapper[4703]: E0130 11:56:52.086350 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:52 crc kubenswrapper[4703]: E0130 11:56:52.086467 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.098967 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.119303 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.153717 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.153769 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.153789 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.153811 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.153825 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:52Z","lastTransitionTime":"2026-01-30T11:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.204342 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 13:16:56.370645588 +0000 UTC Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.256880 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.256933 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.256944 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.256963 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.256976 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:52Z","lastTransitionTime":"2026-01-30T11:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.360028 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.360255 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.360269 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.360306 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.360317 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:52Z","lastTransitionTime":"2026-01-30T11:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.464506 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.464569 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.464581 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.464603 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.464615 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:52Z","lastTransitionTime":"2026-01-30T11:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.567882 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.567942 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.567955 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.567989 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.568006 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:52Z","lastTransitionTime":"2026-01-30T11:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.614700 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.614754 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.614767 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.614792 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.614803 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:52Z","lastTransitionTime":"2026-01-30T11:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:52 crc kubenswrapper[4703]: E0130 11:56:52.632027 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.637941 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.637992 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.638003 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.638030 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.638044 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:52Z","lastTransitionTime":"2026-01-30T11:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:52 crc kubenswrapper[4703]: E0130 11:56:52.654749 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.659951 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.660009 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.660025 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.660051 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.660068 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:52Z","lastTransitionTime":"2026-01-30T11:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:52 crc kubenswrapper[4703]: E0130 11:56:52.674568 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.679160 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.679209 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.679219 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.679243 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.679257 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:52Z","lastTransitionTime":"2026-01-30T11:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:52 crc kubenswrapper[4703]: E0130 11:56:52.695967 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.700595 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.700660 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.700674 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.700702 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.700718 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:52Z","lastTransitionTime":"2026-01-30T11:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:52 crc kubenswrapper[4703]: E0130 11:56:52.718181 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:52 crc kubenswrapper[4703]: E0130 11:56:52.718382 4703 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.720540 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.720600 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.720613 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.720642 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.720659 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:52Z","lastTransitionTime":"2026-01-30T11:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.817392 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovnkube-controller/1.log" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.822499 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.822526 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.822539 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.822561 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.822574 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:52Z","lastTransitionTime":"2026-01-30T11:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.924817 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.924871 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.924883 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.924905 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:52 crc kubenswrapper[4703]: I0130 11:56:52.924919 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:52Z","lastTransitionTime":"2026-01-30T11:56:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.028628 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.028674 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.028685 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.028703 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.028718 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:53Z","lastTransitionTime":"2026-01-30T11:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.085907 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:53 crc kubenswrapper[4703]: E0130 11:56:53.086206 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.131519 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.131578 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.131592 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.131642 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.131662 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:53Z","lastTransitionTime":"2026-01-30T11:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.204622 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 03:07:05.161081778 +0000 UTC Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.235030 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.235075 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.235086 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.235106 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.235144 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:53Z","lastTransitionTime":"2026-01-30T11:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.337743 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.337786 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.337805 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.337824 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.337866 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:53Z","lastTransitionTime":"2026-01-30T11:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.440312 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.440357 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.440367 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.440382 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.440391 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:53Z","lastTransitionTime":"2026-01-30T11:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.543413 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.543451 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.543461 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.543477 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.543488 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:53Z","lastTransitionTime":"2026-01-30T11:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.646325 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.646376 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.646391 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.646416 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.646429 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:53Z","lastTransitionTime":"2026-01-30T11:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.749441 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.749490 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.749505 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.749527 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.749543 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:53Z","lastTransitionTime":"2026-01-30T11:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.853179 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.853232 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.853241 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.853261 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.853273 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:53Z","lastTransitionTime":"2026-01-30T11:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.956629 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.956668 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.956677 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.956691 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:53 crc kubenswrapper[4703]: I0130 11:56:53.956701 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:53Z","lastTransitionTime":"2026-01-30T11:56:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.060728 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.060801 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.060816 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.060841 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.060857 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:54Z","lastTransitionTime":"2026-01-30T11:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.086198 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.086294 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.086198 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:56:54 crc kubenswrapper[4703]: E0130 11:56:54.086448 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:54 crc kubenswrapper[4703]: E0130 11:56:54.086577 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:56:54 crc kubenswrapper[4703]: E0130 11:56:54.086717 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.164813 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.164901 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.164927 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.164961 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.164986 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:54Z","lastTransitionTime":"2026-01-30T11:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.205455 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 02:37:54.676005869 +0000 UTC Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.268242 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.268328 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.268343 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.268370 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.268388 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:54Z","lastTransitionTime":"2026-01-30T11:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.372058 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.372144 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.372171 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.372194 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.372210 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:54Z","lastTransitionTime":"2026-01-30T11:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.383928 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.398224 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.404342 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.424822 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.437902 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.453504 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.467646 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.473967 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.474018 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.474032 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.474051 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.474065 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:54Z","lastTransitionTime":"2026-01-30T11:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.484200 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.498496 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.511097 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.525704 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.534481 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.535489 4703 scope.go:117] "RemoveContainer" containerID="b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468" Jan 30 11:56:54 crc kubenswrapper[4703]: E0130 11:56:54.535682 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.540647 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.557094 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.570877 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.576170 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.576220 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.576238 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.576260 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.576275 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:54Z","lastTransitionTime":"2026-01-30T11:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.585072 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.603852 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.624622 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a98ccce51dc68203db0737e0762d1f807acc46e171a5d2df8e1d516dff43e515\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"message\\\":\\\" 11:56:47.639094 5910 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0130 11:56:47.639106 5910 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0130 11:56:47.639103 5910 handler.go:208] Removed *v1.Node event handler 7\\\\nI0130 11:56:47.639141 5910 handler.go:208] Removed *v1.Node event handler 2\\\\nI0130 11:56:47.639153 5910 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0130 11:56:47.639157 5910 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0130 11:56:47.639171 5910 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0130 11:56:47.639238 5910 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0130 11:56:47.639307 5910 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0130 11:56:47.639343 5910 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0130 11:56:47.639406 5910 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0130 11:56:47.639348 5910 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0130 11:56:47.639457 5910 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0130 11:56:47.639493 5910 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0130 11:56:47.639513 5910 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0130 11:56:47.639504 5910 factory.go:656] Stopping \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:56:51Z\\\",\\\"message\\\":\\\"ng *v1.Pod event handler 6 for removal\\\\nI0130 11:56:51.219927 6167 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0130 11:56:51.219948 6167 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0130 11:56:51.219964 6167 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0130 11:56:51.221516 6167 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0130 11:56:51.221579 6167 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0130 11:56:51.221649 6167 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0130 11:56:51.221704 6167 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0130 11:56:51.221656 6167 handler.go:208] Removed *v1.Node event handler 2\\\\nI0130 11:56:51.221770 6167 handler.go:208] Removed *v1.Node event handler 7\\\\nI0130 11:56:51.221773 6167 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0130 11:56:51.221824 6167 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0130 11:56:51.221892 6167 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0130 11:56:51.221905 6167 factory.go:656] Stopping watch factory\\\\nI0130 11:56:51.221917 6167 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.637048 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.658356 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:56:51Z\\\",\\\"message\\\":\\\"ng *v1.Pod event handler 6 for removal\\\\nI0130 11:56:51.219927 6167 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0130 11:56:51.219948 6167 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0130 11:56:51.219964 6167 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0130 11:56:51.221516 6167 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0130 11:56:51.221579 6167 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0130 11:56:51.221649 6167 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0130 11:56:51.221704 6167 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0130 11:56:51.221656 6167 handler.go:208] Removed *v1.Node event handler 2\\\\nI0130 11:56:51.221770 6167 handler.go:208] Removed *v1.Node event handler 7\\\\nI0130 11:56:51.221773 6167 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0130 11:56:51.221824 6167 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0130 11:56:51.221892 6167 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0130 11:56:51.221905 6167 factory.go:656] Stopping watch factory\\\\nI0130 11:56:51.221917 6167 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.671555 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.678537 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.678607 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.678619 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.678640 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.678653 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:54Z","lastTransitionTime":"2026-01-30T11:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.685669 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.701778 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.714981 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.735722 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33ae9b16-ffdb-4338-ba98-8da799fa7591\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb1699595c372168389cb480f8c41f41a23f856d321138a04599628f1d4e19cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdf1201e129d6158cd5ad6dfb3e93f5ec2a5e75c738edd2dc3bd197e813d6ac5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39eca81eb300b1b4d7a68731db92f76c91270b0bf49f7ae9bcf9643559bcb722\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.754655 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.772285 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.781382 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.781459 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.781482 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.781512 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.781536 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:54Z","lastTransitionTime":"2026-01-30T11:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.785381 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.801315 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.815330 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.830444 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.850025 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.860331 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.864875 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.882157 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.883737 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.883797 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.883810 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.883832 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.883845 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:54Z","lastTransitionTime":"2026-01-30T11:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.897821 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.914837 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.930070 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.947591 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.961888 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.976690 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.986970 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.987034 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.987048 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.987070 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.987086 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:54Z","lastTransitionTime":"2026-01-30T11:56:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:54 crc kubenswrapper[4703]: I0130 11:56:54.995727 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:54Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.017625 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.039222 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:56:51Z\\\",\\\"message\\\":\\\"ng *v1.Pod event handler 6 for removal\\\\nI0130 11:56:51.219927 6167 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0130 11:56:51.219948 6167 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0130 11:56:51.219964 6167 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0130 11:56:51.221516 6167 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0130 11:56:51.221579 6167 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0130 11:56:51.221649 6167 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0130 11:56:51.221704 6167 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0130 11:56:51.221656 6167 handler.go:208] Removed *v1.Node event handler 2\\\\nI0130 11:56:51.221770 6167 handler.go:208] Removed *v1.Node event handler 7\\\\nI0130 11:56:51.221773 6167 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0130 11:56:51.221824 6167 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0130 11:56:51.221892 6167 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0130 11:56:51.221905 6167 factory.go:656] Stopping watch factory\\\\nI0130 11:56:51.221917 6167 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.056556 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.070313 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.085567 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:55 crc kubenswrapper[4703]: E0130 11:56:55.085682 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.087601 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33ae9b16-ffdb-4338-ba98-8da799fa7591\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb1699595c372168389cb480f8c41f41a23f856d321138a04599628f1d4e19cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdf1201e129d6158cd5ad6dfb3e93f5ec2a5e75c738edd2dc3bd197e813d6ac5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39eca81eb300b1b4d7a68731db92f76c91270b0bf49f7ae9bcf9643559bcb722\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.089200 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.089258 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.089271 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.089288 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.089330 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:55Z","lastTransitionTime":"2026-01-30T11:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.102044 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.114073 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.132383 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.149652 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.165222 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.180512 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.191224 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.191272 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.191308 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.191331 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.191347 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:55Z","lastTransitionTime":"2026-01-30T11:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.195579 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.206740 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 18:30:17.958476664 +0000 UTC Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.211532 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.230747 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.245440 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.259716 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.276105 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.292259 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.298181 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.298244 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.298262 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.298286 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.298299 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:55Z","lastTransitionTime":"2026-01-30T11:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.315281 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:56:51Z\\\",\\\"message\\\":\\\"ng *v1.Pod event handler 6 for removal\\\\nI0130 11:56:51.219927 6167 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0130 11:56:51.219948 6167 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0130 11:56:51.219964 6167 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0130 11:56:51.221516 6167 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0130 11:56:51.221579 6167 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0130 11:56:51.221649 6167 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0130 11:56:51.221704 6167 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0130 11:56:51.221656 6167 handler.go:208] Removed *v1.Node event handler 2\\\\nI0130 11:56:51.221770 6167 handler.go:208] Removed *v1.Node event handler 7\\\\nI0130 11:56:51.221773 6167 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0130 11:56:51.221824 6167 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0130 11:56:51.221892 6167 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0130 11:56:51.221905 6167 factory.go:656] Stopping watch factory\\\\nI0130 11:56:51.221917 6167 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.326459 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.339167 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.352046 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.363079 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.374101 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33ae9b16-ffdb-4338-ba98-8da799fa7591\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb1699595c372168389cb480f8c41f41a23f856d321138a04599628f1d4e19cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdf1201e129d6158cd5ad6dfb3e93f5ec2a5e75c738edd2dc3bd197e813d6ac5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39eca81eb300b1b4d7a68731db92f76c91270b0bf49f7ae9bcf9643559bcb722\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.385362 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.395513 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.400845 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.400885 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.400895 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.400910 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.400919 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:55Z","lastTransitionTime":"2026-01-30T11:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.404722 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.415781 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.426883 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:56:55Z is after 2025-08-24T17:21:41Z" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.503764 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.503804 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.503813 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.503830 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.503860 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:55Z","lastTransitionTime":"2026-01-30T11:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.607321 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.607622 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.607697 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.607771 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.607838 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:55Z","lastTransitionTime":"2026-01-30T11:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.710141 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.710758 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.710830 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.710898 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.710971 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:55Z","lastTransitionTime":"2026-01-30T11:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.814353 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.814418 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.814443 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.814476 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.814500 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:55Z","lastTransitionTime":"2026-01-30T11:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.874358 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs\") pod \"network-metrics-daemon-qrt92\" (UID: \"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\") " pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:56:55 crc kubenswrapper[4703]: E0130 11:56:55.874564 4703 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 11:56:55 crc kubenswrapper[4703]: E0130 11:56:55.874634 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs podName:ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd nodeName:}" failed. No retries permitted until 2026-01-30 11:57:03.874612255 +0000 UTC m=+59.652433919 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs") pod "network-metrics-daemon-qrt92" (UID: "ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.917422 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.917497 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.917515 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.917541 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:55 crc kubenswrapper[4703]: I0130 11:56:55.917561 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:55Z","lastTransitionTime":"2026-01-30T11:56:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.019728 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.019823 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.019841 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.019867 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.019885 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:56Z","lastTransitionTime":"2026-01-30T11:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.085755 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.085841 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:56 crc kubenswrapper[4703]: E0130 11:56:56.085899 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:56 crc kubenswrapper[4703]: E0130 11:56:56.086009 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.086536 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:56:56 crc kubenswrapper[4703]: E0130 11:56:56.086747 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.122439 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.122484 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.122492 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.122507 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.122518 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:56Z","lastTransitionTime":"2026-01-30T11:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.207585 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 02:10:29.380795953 +0000 UTC Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.224716 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.224838 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.224849 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.224865 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.224876 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:56Z","lastTransitionTime":"2026-01-30T11:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.328271 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.328359 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.328374 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.328392 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.328416 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:56Z","lastTransitionTime":"2026-01-30T11:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.430432 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.430476 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.430487 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.430504 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.430518 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:56Z","lastTransitionTime":"2026-01-30T11:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.533785 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.533841 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.533857 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.533881 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.533898 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:56Z","lastTransitionTime":"2026-01-30T11:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.636859 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.636964 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.636989 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.637022 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.637046 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:56Z","lastTransitionTime":"2026-01-30T11:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.740776 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.740846 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.740864 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.740892 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.740910 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:56Z","lastTransitionTime":"2026-01-30T11:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.844326 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.844376 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.844398 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.844424 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.844443 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:56Z","lastTransitionTime":"2026-01-30T11:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.885626 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.885884 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.885949 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:56 crc kubenswrapper[4703]: E0130 11:56:56.886205 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:57:28.886175659 +0000 UTC m=+84.663997313 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:56:56 crc kubenswrapper[4703]: E0130 11:56:56.886272 4703 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:56:56 crc kubenswrapper[4703]: E0130 11:56:56.886315 4703 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:56:56 crc kubenswrapper[4703]: E0130 11:56:56.886392 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 11:57:28.886364654 +0000 UTC m=+84.664186348 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:56:56 crc kubenswrapper[4703]: E0130 11:56:56.886420 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 11:57:28.886408045 +0000 UTC m=+84.664229739 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.948261 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.948330 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.948351 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.948418 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.948444 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:56Z","lastTransitionTime":"2026-01-30T11:56:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.987685 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:56 crc kubenswrapper[4703]: I0130 11:56:56.987737 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:56 crc kubenswrapper[4703]: E0130 11:56:56.987961 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:56:56 crc kubenswrapper[4703]: E0130 11:56:56.987982 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:56:56 crc kubenswrapper[4703]: E0130 11:56:56.987996 4703 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:56 crc kubenswrapper[4703]: E0130 11:56:56.988387 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:56:56 crc kubenswrapper[4703]: E0130 11:56:56.988438 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:56:56 crc kubenswrapper[4703]: E0130 11:56:56.988457 4703 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:56 crc kubenswrapper[4703]: E0130 11:56:56.988510 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-30 11:57:28.988485949 +0000 UTC m=+84.766307603 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:56 crc kubenswrapper[4703]: E0130 11:56:56.988545 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-30 11:57:28.98852968 +0000 UTC m=+84.766351334 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.051639 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.051714 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.051730 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.051749 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.051759 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:57Z","lastTransitionTime":"2026-01-30T11:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.085980 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:57 crc kubenswrapper[4703]: E0130 11:56:57.086110 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.154208 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.154249 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.154260 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.154274 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.154283 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:57Z","lastTransitionTime":"2026-01-30T11:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.208388 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 07:20:17.347056479 +0000 UTC Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.257731 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.257773 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.257784 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.257803 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.257816 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:57Z","lastTransitionTime":"2026-01-30T11:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.361207 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.361291 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.361317 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.361350 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.361376 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:57Z","lastTransitionTime":"2026-01-30T11:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.463962 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.464013 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.464027 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.464046 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.464058 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:57Z","lastTransitionTime":"2026-01-30T11:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.566888 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.566948 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.566965 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.566992 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.567011 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:57Z","lastTransitionTime":"2026-01-30T11:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.670295 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.670365 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.670381 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.670419 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.670437 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:57Z","lastTransitionTime":"2026-01-30T11:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.773730 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.773800 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.773821 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.773849 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.773868 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:57Z","lastTransitionTime":"2026-01-30T11:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.876647 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.876734 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.876747 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.876764 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.876776 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:57Z","lastTransitionTime":"2026-01-30T11:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.978750 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.978806 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.978821 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.978845 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:57 crc kubenswrapper[4703]: I0130 11:56:57.978860 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:57Z","lastTransitionTime":"2026-01-30T11:56:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.081502 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.081541 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.081550 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.081566 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.081576 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:58Z","lastTransitionTime":"2026-01-30T11:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.085969 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.086009 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:56:58 crc kubenswrapper[4703]: E0130 11:56:58.086087 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.085970 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:56:58 crc kubenswrapper[4703]: E0130 11:56:58.086269 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:56:58 crc kubenswrapper[4703]: E0130 11:56:58.086462 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.184447 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.184493 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.184504 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.184521 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.184534 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:58Z","lastTransitionTime":"2026-01-30T11:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.209059 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 09:09:54.21951805 +0000 UTC Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.286573 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.286655 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.286681 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.286719 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.286743 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:58Z","lastTransitionTime":"2026-01-30T11:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.389425 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.389524 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.389551 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.389597 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.389623 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:58Z","lastTransitionTime":"2026-01-30T11:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.493426 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.493493 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.493510 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.493535 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.493552 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:58Z","lastTransitionTime":"2026-01-30T11:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.596050 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.596098 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.596107 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.596144 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.596153 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:58Z","lastTransitionTime":"2026-01-30T11:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.697939 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.697981 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.697993 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.698007 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.698016 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:58Z","lastTransitionTime":"2026-01-30T11:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.800858 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.800960 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.800977 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.800997 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.801012 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:58Z","lastTransitionTime":"2026-01-30T11:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.904086 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.904181 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.904199 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.904224 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:58 crc kubenswrapper[4703]: I0130 11:56:58.904241 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:58Z","lastTransitionTime":"2026-01-30T11:56:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.007692 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.007747 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.007767 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.007812 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.007844 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:59Z","lastTransitionTime":"2026-01-30T11:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.085659 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:56:59 crc kubenswrapper[4703]: E0130 11:56:59.085876 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.109847 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.109915 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.109938 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.109970 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.109994 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:59Z","lastTransitionTime":"2026-01-30T11:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.210291 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 23:20:13.253762305 +0000 UTC Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.212469 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.212566 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.212584 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.212609 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.212625 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:59Z","lastTransitionTime":"2026-01-30T11:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.315344 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.315456 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.315476 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.315501 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.315518 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:59Z","lastTransitionTime":"2026-01-30T11:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.418181 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.418258 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.418275 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.418301 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.418318 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:59Z","lastTransitionTime":"2026-01-30T11:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.521729 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.521787 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.521805 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.521830 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.521851 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:59Z","lastTransitionTime":"2026-01-30T11:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.627559 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.627637 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.627657 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.627686 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.627731 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:59Z","lastTransitionTime":"2026-01-30T11:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.730546 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.730586 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.730596 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.730614 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.730624 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:59Z","lastTransitionTime":"2026-01-30T11:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.833990 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.835085 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.835346 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.835571 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.835748 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:59Z","lastTransitionTime":"2026-01-30T11:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.939501 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.939559 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.939576 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.939605 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:56:59 crc kubenswrapper[4703]: I0130 11:56:59.939629 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:56:59Z","lastTransitionTime":"2026-01-30T11:56:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.042251 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.042299 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.042311 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.042333 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.042355 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:00Z","lastTransitionTime":"2026-01-30T11:57:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.085706 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.085791 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.085845 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:00 crc kubenswrapper[4703]: E0130 11:57:00.086066 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:00 crc kubenswrapper[4703]: E0130 11:57:00.086183 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:00 crc kubenswrapper[4703]: E0130 11:57:00.086339 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.144977 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.145033 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.145048 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.145071 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.145089 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:00Z","lastTransitionTime":"2026-01-30T11:57:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.210876 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 12:14:30.520916613 +0000 UTC Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.248442 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.248521 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.248539 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.248561 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.248574 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:00Z","lastTransitionTime":"2026-01-30T11:57:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.352055 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.352115 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.352147 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.352168 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.352178 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:00Z","lastTransitionTime":"2026-01-30T11:57:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.455076 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.455154 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.455170 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.455188 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.455203 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:00Z","lastTransitionTime":"2026-01-30T11:57:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.557873 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.557915 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.557925 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.557942 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.557951 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:00Z","lastTransitionTime":"2026-01-30T11:57:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.660792 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.660829 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.660838 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.660855 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.660864 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:00Z","lastTransitionTime":"2026-01-30T11:57:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.764706 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.764764 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.764776 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.764799 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.764819 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:00Z","lastTransitionTime":"2026-01-30T11:57:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.867389 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.867455 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.867468 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.867494 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.867511 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:00Z","lastTransitionTime":"2026-01-30T11:57:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.969693 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.969739 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.969750 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.969772 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:00 crc kubenswrapper[4703]: I0130 11:57:00.969787 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:00Z","lastTransitionTime":"2026-01-30T11:57:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.072154 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.072237 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.072255 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.072289 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.072304 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:01Z","lastTransitionTime":"2026-01-30T11:57:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.086165 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:01 crc kubenswrapper[4703]: E0130 11:57:01.086316 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.175419 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.175481 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.175491 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.175516 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.175527 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:01Z","lastTransitionTime":"2026-01-30T11:57:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.212085 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 16:23:09.180644816 +0000 UTC Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.278160 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.278223 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.278238 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.278265 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.278283 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:01Z","lastTransitionTime":"2026-01-30T11:57:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.381634 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.381842 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.381924 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.382015 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.382076 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:01Z","lastTransitionTime":"2026-01-30T11:57:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.484651 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.484698 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.484709 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.484731 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.484743 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:01Z","lastTransitionTime":"2026-01-30T11:57:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.586954 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.587005 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.587015 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.587032 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.587041 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:01Z","lastTransitionTime":"2026-01-30T11:57:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.690013 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.690056 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.690064 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.690084 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.690095 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:01Z","lastTransitionTime":"2026-01-30T11:57:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.794259 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.794307 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.794321 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.794340 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.794353 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:01Z","lastTransitionTime":"2026-01-30T11:57:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.897692 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.897766 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.897776 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.897796 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:01 crc kubenswrapper[4703]: I0130 11:57:01.897807 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:01Z","lastTransitionTime":"2026-01-30T11:57:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.000831 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.000880 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.000890 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.000910 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.000921 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:02Z","lastTransitionTime":"2026-01-30T11:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.085808 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.085894 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.085809 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:02 crc kubenswrapper[4703]: E0130 11:57:02.085945 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:02 crc kubenswrapper[4703]: E0130 11:57:02.086036 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:02 crc kubenswrapper[4703]: E0130 11:57:02.086207 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.103909 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.103961 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.103974 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.104014 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.104026 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:02Z","lastTransitionTime":"2026-01-30T11:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.206731 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.206777 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.206795 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.206810 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.206819 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:02Z","lastTransitionTime":"2026-01-30T11:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.213242 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 19:13:42.647123436 +0000 UTC Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.310818 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.310873 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.310883 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.310900 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.310911 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:02Z","lastTransitionTime":"2026-01-30T11:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.414682 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.414747 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.414759 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.414787 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.414800 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:02Z","lastTransitionTime":"2026-01-30T11:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.518382 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.518429 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.518439 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.518460 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.518472 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:02Z","lastTransitionTime":"2026-01-30T11:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.621484 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.621520 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.621531 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.621545 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.621558 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:02Z","lastTransitionTime":"2026-01-30T11:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.724317 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.724414 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.724448 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.724482 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.724524 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:02Z","lastTransitionTime":"2026-01-30T11:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.789058 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.789275 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.789300 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.789378 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.789396 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:02Z","lastTransitionTime":"2026-01-30T11:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:02 crc kubenswrapper[4703]: E0130 11:57:02.805880 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:02Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.810427 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.810480 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.810493 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.810514 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.810526 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:02Z","lastTransitionTime":"2026-01-30T11:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:02 crc kubenswrapper[4703]: E0130 11:57:02.833248 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:02Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.839509 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.839570 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.839585 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.839608 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.839627 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:02Z","lastTransitionTime":"2026-01-30T11:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:02 crc kubenswrapper[4703]: E0130 11:57:02.863304 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:02Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.868906 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.868966 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.868986 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.869013 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.869035 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:02Z","lastTransitionTime":"2026-01-30T11:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:02 crc kubenswrapper[4703]: E0130 11:57:02.893907 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:02Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.898300 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.898358 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.898378 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.898417 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.898453 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:02Z","lastTransitionTime":"2026-01-30T11:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:02 crc kubenswrapper[4703]: E0130 11:57:02.919898 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:02Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:02 crc kubenswrapper[4703]: E0130 11:57:02.920133 4703 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.922572 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.922645 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.922681 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.922714 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:02 crc kubenswrapper[4703]: I0130 11:57:02.922737 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:02Z","lastTransitionTime":"2026-01-30T11:57:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.026311 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.026355 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.026367 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.026393 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.026406 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:03Z","lastTransitionTime":"2026-01-30T11:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.086174 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:03 crc kubenswrapper[4703]: E0130 11:57:03.086325 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.128402 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.128472 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.128494 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.128534 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.128566 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:03Z","lastTransitionTime":"2026-01-30T11:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.214193 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 11:12:33.291444591 +0000 UTC Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.231858 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.232308 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.232619 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.232920 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.233144 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:03Z","lastTransitionTime":"2026-01-30T11:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.335271 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.335343 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.335359 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.335374 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.335383 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:03Z","lastTransitionTime":"2026-01-30T11:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.438500 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.438569 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.438595 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.438625 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.438651 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:03Z","lastTransitionTime":"2026-01-30T11:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.540608 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.540963 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.541109 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.541298 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.541388 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:03Z","lastTransitionTime":"2026-01-30T11:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.644331 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.644374 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.644385 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.644401 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.644412 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:03Z","lastTransitionTime":"2026-01-30T11:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.746477 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.746556 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.746569 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.746585 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.746598 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:03Z","lastTransitionTime":"2026-01-30T11:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.849829 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.849870 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.849879 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.849893 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.849902 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:03Z","lastTransitionTime":"2026-01-30T11:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.956317 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.956440 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.956485 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.956520 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.956543 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:03Z","lastTransitionTime":"2026-01-30T11:57:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:03 crc kubenswrapper[4703]: I0130 11:57:03.967103 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs\") pod \"network-metrics-daemon-qrt92\" (UID: \"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\") " pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:03 crc kubenswrapper[4703]: E0130 11:57:03.967337 4703 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 11:57:03 crc kubenswrapper[4703]: E0130 11:57:03.967443 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs podName:ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd nodeName:}" failed. No retries permitted until 2026-01-30 11:57:19.967406205 +0000 UTC m=+75.745227899 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs") pod "network-metrics-daemon-qrt92" (UID: "ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.059597 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.059669 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.059679 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.059696 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.059706 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:04Z","lastTransitionTime":"2026-01-30T11:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.085313 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.085348 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.085358 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:04 crc kubenswrapper[4703]: E0130 11:57:04.085469 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:04 crc kubenswrapper[4703]: E0130 11:57:04.085612 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:04 crc kubenswrapper[4703]: E0130 11:57:04.085776 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.163058 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.163153 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.163171 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.163193 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.163208 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:04Z","lastTransitionTime":"2026-01-30T11:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.214842 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 14:53:33.149045722 +0000 UTC Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.265930 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.265967 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.265975 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.265988 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.265998 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:04Z","lastTransitionTime":"2026-01-30T11:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.368319 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.368368 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.368379 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.368396 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.368407 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:04Z","lastTransitionTime":"2026-01-30T11:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.471240 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.471274 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.471283 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.471298 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.471310 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:04Z","lastTransitionTime":"2026-01-30T11:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.574207 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.574280 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.574298 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.574327 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.574345 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:04Z","lastTransitionTime":"2026-01-30T11:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.677237 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.677283 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.677334 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.677353 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.677365 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:04Z","lastTransitionTime":"2026-01-30T11:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.780016 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.780070 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.780085 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.780107 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.780158 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:04Z","lastTransitionTime":"2026-01-30T11:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.882295 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.882346 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.882360 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.882382 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.882397 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:04Z","lastTransitionTime":"2026-01-30T11:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.985441 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.985493 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.985504 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.985524 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:04 crc kubenswrapper[4703]: I0130 11:57:04.985536 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:04Z","lastTransitionTime":"2026-01-30T11:57:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.086404 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:05 crc kubenswrapper[4703]: E0130 11:57:05.086539 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.088079 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.088244 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.088356 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.088447 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.088534 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:05Z","lastTransitionTime":"2026-01-30T11:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.103157 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:05Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.122027 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:05Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.137294 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:05Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.148614 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:05Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.165857 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33ae9b16-ffdb-4338-ba98-8da799fa7591\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb1699595c372168389cb480f8c41f41a23f856d321138a04599628f1d4e19cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdf1201e129d6158cd5ad6dfb3e93f5ec2a5e75c738edd2dc3bd197e813d6ac5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39eca81eb300b1b4d7a68731db92f76c91270b0bf49f7ae9bcf9643559bcb722\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:05Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.182812 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:05Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.190190 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.190226 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.190237 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.190255 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.190266 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:05Z","lastTransitionTime":"2026-01-30T11:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.197214 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:05Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.210177 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:05Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.215789 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 16:04:33.353450056 +0000 UTC Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.225203 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:05Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.237396 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:05Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.248451 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:05Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.260525 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:05Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.272679 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:05Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.291031 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:05Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.293099 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.293161 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.293177 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.293196 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.293209 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:05Z","lastTransitionTime":"2026-01-30T11:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.302391 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:05Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.319692 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:56:51Z\\\",\\\"message\\\":\\\"ng *v1.Pod event handler 6 for removal\\\\nI0130 11:56:51.219927 6167 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0130 11:56:51.219948 6167 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0130 11:56:51.219964 6167 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0130 11:56:51.221516 6167 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0130 11:56:51.221579 6167 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0130 11:56:51.221649 6167 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0130 11:56:51.221704 6167 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0130 11:56:51.221656 6167 handler.go:208] Removed *v1.Node event handler 2\\\\nI0130 11:56:51.221770 6167 handler.go:208] Removed *v1.Node event handler 7\\\\nI0130 11:56:51.221773 6167 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0130 11:56:51.221824 6167 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0130 11:56:51.221892 6167 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0130 11:56:51.221905 6167 factory.go:656] Stopping watch factory\\\\nI0130 11:56:51.221917 6167 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:05Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.330183 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:05Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.395607 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.395665 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.395688 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.395719 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.395740 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:05Z","lastTransitionTime":"2026-01-30T11:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.498538 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.498598 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.498614 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.498635 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.498650 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:05Z","lastTransitionTime":"2026-01-30T11:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.601692 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.601735 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.601747 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.601769 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.601782 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:05Z","lastTransitionTime":"2026-01-30T11:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.704770 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.704870 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.704896 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.704948 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.704968 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:05Z","lastTransitionTime":"2026-01-30T11:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.808377 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.808464 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.808474 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.808524 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.808537 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:05Z","lastTransitionTime":"2026-01-30T11:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.912271 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.912319 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.912335 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.912362 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:05 crc kubenswrapper[4703]: I0130 11:57:05.912380 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:05Z","lastTransitionTime":"2026-01-30T11:57:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.014473 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.014533 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.014544 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.014562 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.014602 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:06Z","lastTransitionTime":"2026-01-30T11:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.085402 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.085456 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.085546 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:06 crc kubenswrapper[4703]: E0130 11:57:06.085539 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:06 crc kubenswrapper[4703]: E0130 11:57:06.085653 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:06 crc kubenswrapper[4703]: E0130 11:57:06.086075 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.116786 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.116835 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.116847 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.116864 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.116877 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:06Z","lastTransitionTime":"2026-01-30T11:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.216209 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 19:02:29.177964708 +0000 UTC Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.219426 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.219491 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.219505 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.219522 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.219532 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:06Z","lastTransitionTime":"2026-01-30T11:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.322416 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.322462 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.322475 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.322494 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.322508 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:06Z","lastTransitionTime":"2026-01-30T11:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.425751 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.425823 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.425840 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.425868 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.425885 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:06Z","lastTransitionTime":"2026-01-30T11:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.529481 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.529549 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.529578 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.529609 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.529632 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:06Z","lastTransitionTime":"2026-01-30T11:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.632086 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.632190 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.632208 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.632234 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.632255 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:06Z","lastTransitionTime":"2026-01-30T11:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.735340 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.735374 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.735384 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.735399 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.735409 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:06Z","lastTransitionTime":"2026-01-30T11:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.838289 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.838338 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.838349 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.838365 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.838374 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:06Z","lastTransitionTime":"2026-01-30T11:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.941677 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.941711 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.941720 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.941735 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:06 crc kubenswrapper[4703]: I0130 11:57:06.941743 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:06Z","lastTransitionTime":"2026-01-30T11:57:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.044145 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.044183 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.044193 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.044210 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.044221 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:07Z","lastTransitionTime":"2026-01-30T11:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.086041 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:07 crc kubenswrapper[4703]: E0130 11:57:07.086288 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.146963 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.147028 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.147042 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.147059 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.147070 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:07Z","lastTransitionTime":"2026-01-30T11:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.216982 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 03:30:28.526322467 +0000 UTC Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.249775 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.249823 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.249836 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.249854 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.249865 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:07Z","lastTransitionTime":"2026-01-30T11:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.352396 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.352456 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.352477 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.352503 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.352520 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:07Z","lastTransitionTime":"2026-01-30T11:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.454819 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.454859 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.454868 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.454883 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.454892 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:07Z","lastTransitionTime":"2026-01-30T11:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.557413 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.557470 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.557487 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.557512 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.557529 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:07Z","lastTransitionTime":"2026-01-30T11:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.660191 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.660240 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.660251 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.660272 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.660282 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:07Z","lastTransitionTime":"2026-01-30T11:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.762404 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.762464 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.762483 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.762505 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.762522 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:07Z","lastTransitionTime":"2026-01-30T11:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.864962 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.864995 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.865003 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.865344 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.865369 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:07Z","lastTransitionTime":"2026-01-30T11:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.967921 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.968146 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.968156 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.968170 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:07 crc kubenswrapper[4703]: I0130 11:57:07.968179 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:07Z","lastTransitionTime":"2026-01-30T11:57:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.070707 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.070753 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.070762 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.070777 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.070787 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:08Z","lastTransitionTime":"2026-01-30T11:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.085338 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.085377 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:08 crc kubenswrapper[4703]: E0130 11:57:08.085475 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.085556 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:08 crc kubenswrapper[4703]: E0130 11:57:08.085828 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:08 crc kubenswrapper[4703]: E0130 11:57:08.085958 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.086217 4703 scope.go:117] "RemoveContainer" containerID="b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.173656 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.173720 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.173737 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.173756 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.173768 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:08Z","lastTransitionTime":"2026-01-30T11:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.218029 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 08:36:31.316497772 +0000 UTC Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.277388 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.277430 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.277441 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.277460 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.277475 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:08Z","lastTransitionTime":"2026-01-30T11:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.380103 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.380161 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.380173 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.380190 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.380200 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:08Z","lastTransitionTime":"2026-01-30T11:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.482581 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.482624 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.482633 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.482649 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.482658 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:08Z","lastTransitionTime":"2026-01-30T11:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.585576 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.585651 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.585676 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.585707 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.585732 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:08Z","lastTransitionTime":"2026-01-30T11:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.688236 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.688310 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.688322 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.688341 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.688353 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:08Z","lastTransitionTime":"2026-01-30T11:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.790974 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.791009 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.791021 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.791040 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.791051 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:08Z","lastTransitionTime":"2026-01-30T11:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.871573 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovnkube-controller/1.log" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.873681 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerStarted","Data":"13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53"} Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.874145 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.886239 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:08Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.893786 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.893829 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.893839 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.893857 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.893868 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:08Z","lastTransitionTime":"2026-01-30T11:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.900785 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:08Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.912844 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:08Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.925399 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33ae9b16-ffdb-4338-ba98-8da799fa7591\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb1699595c372168389cb480f8c41f41a23f856d321138a04599628f1d4e19cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdf1201e129d6158cd5ad6dfb3e93f5ec2a5e75c738edd2dc3bd197e813d6ac5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39eca81eb300b1b4d7a68731db92f76c91270b0bf49f7ae9bcf9643559bcb722\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:08Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.937328 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:08Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.950320 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:08Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.964223 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:08Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.978715 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:08Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.997065 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.997096 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.997106 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.997145 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.997157 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:08Z","lastTransitionTime":"2026-01-30T11:57:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:08 crc kubenswrapper[4703]: I0130 11:57:08.999660 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:08Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.014044 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.077992 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.086254 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:09 crc kubenswrapper[4703]: E0130 11:57:09.086390 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.099802 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.099804 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.099838 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.099981 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.100021 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.100046 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:09Z","lastTransitionTime":"2026-01-30T11:57:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.120405 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.136016 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.148440 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.167427 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:56:51Z\\\",\\\"message\\\":\\\"ng *v1.Pod event handler 6 for removal\\\\nI0130 11:56:51.219927 6167 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0130 11:56:51.219948 6167 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0130 11:56:51.219964 6167 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0130 11:56:51.221516 6167 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0130 11:56:51.221579 6167 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0130 11:56:51.221649 6167 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0130 11:56:51.221704 6167 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0130 11:56:51.221656 6167 handler.go:208] Removed *v1.Node event handler 2\\\\nI0130 11:56:51.221770 6167 handler.go:208] Removed *v1.Node event handler 7\\\\nI0130 11:56:51.221773 6167 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0130 11:56:51.221824 6167 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0130 11:56:51.221892 6167 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0130 11:56:51.221905 6167 factory.go:656] Stopping watch factory\\\\nI0130 11:56:51.221917 6167 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:57:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.176694 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.202180 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.202224 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.202236 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.202253 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.202267 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:09Z","lastTransitionTime":"2026-01-30T11:57:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.218397 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 20:37:41.109264219 +0000 UTC Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.304680 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.304716 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.304725 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.304741 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.304751 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:09Z","lastTransitionTime":"2026-01-30T11:57:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.407410 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.407482 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.407519 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.407537 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.407547 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:09Z","lastTransitionTime":"2026-01-30T11:57:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.509521 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.509830 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.509842 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.509858 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.509868 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:09Z","lastTransitionTime":"2026-01-30T11:57:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.614275 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.614312 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.614325 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.614342 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.614353 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:09Z","lastTransitionTime":"2026-01-30T11:57:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.716618 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.716657 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.716669 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.716686 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.716698 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:09Z","lastTransitionTime":"2026-01-30T11:57:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.819201 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.819237 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.819247 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.819263 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.819273 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:09Z","lastTransitionTime":"2026-01-30T11:57:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.925585 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.925618 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.925637 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.925656 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.925667 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:09Z","lastTransitionTime":"2026-01-30T11:57:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.927110 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovnkube-controller/2.log" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.927657 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovnkube-controller/1.log" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.930099 4703 generic.go:334] "Generic (PLEG): container finished" podID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerID="13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53" exitCode=1 Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.930162 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerDied","Data":"13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53"} Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.930230 4703 scope.go:117] "RemoveContainer" containerID="b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.930729 4703 scope.go:117] "RemoveContainer" containerID="13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53" Jan 30 11:57:09 crc kubenswrapper[4703]: E0130 11:57:09.930860 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.942932 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.956076 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.970167 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.983044 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:09 crc kubenswrapper[4703]: I0130 11:57:09.998468 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.008976 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:10Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.022534 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:10Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.027819 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.027857 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.027865 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.027891 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.027920 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:10Z","lastTransitionTime":"2026-01-30T11:57:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.038342 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:10Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.050466 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:10Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.063790 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:10Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.084194 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b91afa57ecf477f01f8341fb208ad46662c6e374aebd3a129031b3a6e17a0468\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:56:51Z\\\",\\\"message\\\":\\\"ng *v1.Pod event handler 6 for removal\\\\nI0130 11:56:51.219927 6167 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0130 11:56:51.219948 6167 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0130 11:56:51.219964 6167 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0130 11:56:51.221516 6167 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0130 11:56:51.221579 6167 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0130 11:56:51.221649 6167 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0130 11:56:51.221704 6167 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0130 11:56:51.221656 6167 handler.go:208] Removed *v1.Node event handler 2\\\\nI0130 11:56:51.221770 6167 handler.go:208] Removed *v1.Node event handler 7\\\\nI0130 11:56:51.221773 6167 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0130 11:56:51.221824 6167 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0130 11:56:51.221892 6167 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0130 11:56:51.221905 6167 factory.go:656] Stopping watch factory\\\\nI0130 11:56:51.221917 6167 handler.go:208] Removed *v1.EgressFirewall ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:09Z\\\",\\\"message\\\":\\\"try.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-l8kf2\\\\nI0130 11:57:09.853692 6396 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-cx2rm in node crc\\\\nF0130 11:57:09.853684 6396 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z]\\\\nI0130 11:57:09.853699 6396 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-cx2rm after 0 failed attempt(s)\\\\nI0130 11:57:09.853704 6396 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/mac\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:57:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:10Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.085529 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:10 crc kubenswrapper[4703]: E0130 11:57:10.085652 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.085713 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:10 crc kubenswrapper[4703]: E0130 11:57:10.085828 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.085886 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:10 crc kubenswrapper[4703]: E0130 11:57:10.085956 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.095093 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:10Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.109260 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:10Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.122998 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:10Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.131164 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.131239 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.131258 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.131275 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.131288 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:10Z","lastTransitionTime":"2026-01-30T11:57:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.135518 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33ae9b16-ffdb-4338-ba98-8da799fa7591\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb1699595c372168389cb480f8c41f41a23f856d321138a04599628f1d4e19cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdf1201e129d6158cd5ad6dfb3e93f5ec2a5e75c738edd2dc3bd197e813d6ac5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39eca81eb300b1b4d7a68731db92f76c91270b0bf49f7ae9bcf9643559bcb722\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:10Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.150286 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:10Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.162424 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:10Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.218833 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 04:47:41.818280715 +0000 UTC Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.235166 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.235262 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.235275 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.235292 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.235302 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:10Z","lastTransitionTime":"2026-01-30T11:57:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.337880 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.337943 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.337954 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.337977 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.337996 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:10Z","lastTransitionTime":"2026-01-30T11:57:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.440259 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.440299 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.440311 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.440329 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.440340 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:10Z","lastTransitionTime":"2026-01-30T11:57:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.542358 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.542390 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.542399 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.542414 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.542424 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:10Z","lastTransitionTime":"2026-01-30T11:57:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.645776 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.645842 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.645853 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.645872 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.645884 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:10Z","lastTransitionTime":"2026-01-30T11:57:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.749004 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.749047 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.749057 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.749073 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.749082 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:10Z","lastTransitionTime":"2026-01-30T11:57:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.851861 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.851925 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.851945 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.851971 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.851988 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:10Z","lastTransitionTime":"2026-01-30T11:57:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.935576 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovnkube-controller/2.log" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.939509 4703 scope.go:117] "RemoveContainer" containerID="13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53" Jan 30 11:57:10 crc kubenswrapper[4703]: E0130 11:57:10.939930 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.954208 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.954255 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.954273 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.954291 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.954302 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:10Z","lastTransitionTime":"2026-01-30T11:57:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.959982 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:10Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.974696 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:10Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:10 crc kubenswrapper[4703]: I0130 11:57:10.989776 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:10Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.003614 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:11Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.021566 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:11Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.038156 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:11Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.051580 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:11Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.065671 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:11Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.079697 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:11Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.085589 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:11 crc kubenswrapper[4703]: E0130 11:57:11.085758 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.087085 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.087113 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.087165 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.087181 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.087192 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:11Z","lastTransitionTime":"2026-01-30T11:57:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.097077 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:11Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.118769 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:09Z\\\",\\\"message\\\":\\\"try.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-l8kf2\\\\nI0130 11:57:09.853692 6396 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-cx2rm in node crc\\\\nF0130 11:57:09.853684 6396 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z]\\\\nI0130 11:57:09.853699 6396 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-cx2rm after 0 failed attempt(s)\\\\nI0130 11:57:09.853704 6396 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/mac\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:57:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:11Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.130086 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:11Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.140646 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:11Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.152150 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33ae9b16-ffdb-4338-ba98-8da799fa7591\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb1699595c372168389cb480f8c41f41a23f856d321138a04599628f1d4e19cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdf1201e129d6158cd5ad6dfb3e93f5ec2a5e75c738edd2dc3bd197e813d6ac5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39eca81eb300b1b4d7a68731db92f76c91270b0bf49f7ae9bcf9643559bcb722\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:11Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.167034 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:11Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.178141 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:11Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.189617 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.189658 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.189666 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.189686 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.189697 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:11Z","lastTransitionTime":"2026-01-30T11:57:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.194100 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:11Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.219318 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 22:19:28.263554689 +0000 UTC Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.292119 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.292174 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.292182 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.292214 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.292225 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:11Z","lastTransitionTime":"2026-01-30T11:57:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.394992 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.395061 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.395081 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.395111 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.395257 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:11Z","lastTransitionTime":"2026-01-30T11:57:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.497675 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.497713 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.497722 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.497739 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.497748 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:11Z","lastTransitionTime":"2026-01-30T11:57:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.600872 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.600927 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.600945 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.600973 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.600991 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:11Z","lastTransitionTime":"2026-01-30T11:57:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.717657 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.717715 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.717723 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.717740 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.717751 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:11Z","lastTransitionTime":"2026-01-30T11:57:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.820139 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.820190 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.820206 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.820223 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.820235 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:11Z","lastTransitionTime":"2026-01-30T11:57:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.922740 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.922812 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.922824 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.922842 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:11 crc kubenswrapper[4703]: I0130 11:57:11.922853 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:11Z","lastTransitionTime":"2026-01-30T11:57:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.025603 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.025666 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.025687 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.025717 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.025735 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:12Z","lastTransitionTime":"2026-01-30T11:57:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.085564 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.085595 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.085580 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:12 crc kubenswrapper[4703]: E0130 11:57:12.085742 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:12 crc kubenswrapper[4703]: E0130 11:57:12.085893 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:12 crc kubenswrapper[4703]: E0130 11:57:12.085935 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.128357 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.128410 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.128418 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.128435 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.128444 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:12Z","lastTransitionTime":"2026-01-30T11:57:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.219732 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 00:52:48.135199288 +0000 UTC Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.231298 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.231346 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.231355 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.231370 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.231381 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:12Z","lastTransitionTime":"2026-01-30T11:57:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.333743 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.333788 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.333796 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.333812 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.333821 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:12Z","lastTransitionTime":"2026-01-30T11:57:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.436516 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.436554 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.436564 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.436581 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.436591 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:12Z","lastTransitionTime":"2026-01-30T11:57:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.540403 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.540491 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.540507 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.540527 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.540540 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:12Z","lastTransitionTime":"2026-01-30T11:57:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.657529 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.657561 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.657573 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.657591 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.657605 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:12Z","lastTransitionTime":"2026-01-30T11:57:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.760418 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.760475 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.760487 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.760505 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.760516 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:12Z","lastTransitionTime":"2026-01-30T11:57:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.866405 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.866454 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.866473 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.866490 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.866499 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:12Z","lastTransitionTime":"2026-01-30T11:57:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.969460 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.969501 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.969510 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.969526 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:12 crc kubenswrapper[4703]: I0130 11:57:12.969536 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:12Z","lastTransitionTime":"2026-01-30T11:57:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.071742 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.071788 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.071806 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.071825 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.071835 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:13Z","lastTransitionTime":"2026-01-30T11:57:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.086173 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:13 crc kubenswrapper[4703]: E0130 11:57:13.086333 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.174397 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.174449 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.174461 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.174479 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.174490 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:13Z","lastTransitionTime":"2026-01-30T11:57:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.220585 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 08:12:28.776441987 +0000 UTC Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.277210 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.277254 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.277263 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.277289 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.277310 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:13Z","lastTransitionTime":"2026-01-30T11:57:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.295721 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.295764 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.295778 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.295794 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.295803 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:13Z","lastTransitionTime":"2026-01-30T11:57:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:13 crc kubenswrapper[4703]: E0130 11:57:13.309956 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:13Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.313316 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.313347 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.313357 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.313373 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.313382 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:13Z","lastTransitionTime":"2026-01-30T11:57:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:13 crc kubenswrapper[4703]: E0130 11:57:13.325659 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:13Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.339191 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.339250 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.339263 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.339281 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.339292 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:13Z","lastTransitionTime":"2026-01-30T11:57:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:13 crc kubenswrapper[4703]: E0130 11:57:13.352598 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:13Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.355764 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.355830 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.355845 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.355886 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.355900 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:13Z","lastTransitionTime":"2026-01-30T11:57:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:13 crc kubenswrapper[4703]: E0130 11:57:13.369014 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:13Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.372414 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.372464 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.372475 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.372519 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.372530 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:13Z","lastTransitionTime":"2026-01-30T11:57:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:13 crc kubenswrapper[4703]: E0130 11:57:13.383185 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:13Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:13 crc kubenswrapper[4703]: E0130 11:57:13.383326 4703 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.384737 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.384784 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.384804 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.384827 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.384840 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:13Z","lastTransitionTime":"2026-01-30T11:57:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.486789 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.486812 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.486821 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.486835 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.486844 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:13Z","lastTransitionTime":"2026-01-30T11:57:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.589874 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.589918 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.589935 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.589951 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.589960 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:13Z","lastTransitionTime":"2026-01-30T11:57:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.692717 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.692756 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.692767 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.692783 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.692794 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:13Z","lastTransitionTime":"2026-01-30T11:57:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.796253 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.796287 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.796296 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.796311 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.796320 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:13Z","lastTransitionTime":"2026-01-30T11:57:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.898660 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.898699 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.898708 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.898725 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:13 crc kubenswrapper[4703]: I0130 11:57:13.898735 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:13Z","lastTransitionTime":"2026-01-30T11:57:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.000988 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.001021 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.001031 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.001045 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.001055 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:14Z","lastTransitionTime":"2026-01-30T11:57:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.086061 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.086133 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.086083 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:14 crc kubenswrapper[4703]: E0130 11:57:14.086238 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:14 crc kubenswrapper[4703]: E0130 11:57:14.086338 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:14 crc kubenswrapper[4703]: E0130 11:57:14.086445 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.103282 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.103323 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.103334 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.103350 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.103361 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:14Z","lastTransitionTime":"2026-01-30T11:57:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.205772 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.205814 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.205823 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.205841 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.205851 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:14Z","lastTransitionTime":"2026-01-30T11:57:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.221006 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 01:28:28.715304631 +0000 UTC Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.308466 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.308504 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.308530 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.308548 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.308562 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:14Z","lastTransitionTime":"2026-01-30T11:57:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.411170 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.411235 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.411249 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.411266 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.411277 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:14Z","lastTransitionTime":"2026-01-30T11:57:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.514271 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.514313 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.514327 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.514346 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.514358 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:14Z","lastTransitionTime":"2026-01-30T11:57:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.617353 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.617400 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.617412 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.617428 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.617439 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:14Z","lastTransitionTime":"2026-01-30T11:57:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.720007 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.720481 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.720607 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.720721 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.720805 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:14Z","lastTransitionTime":"2026-01-30T11:57:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.823707 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.823750 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.823764 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.823785 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.823801 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:14Z","lastTransitionTime":"2026-01-30T11:57:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.926325 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.926361 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.926370 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.926384 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:14 crc kubenswrapper[4703]: I0130 11:57:14.926394 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:14Z","lastTransitionTime":"2026-01-30T11:57:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.028810 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.028844 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.028852 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.028867 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.028876 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:15Z","lastTransitionTime":"2026-01-30T11:57:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.085942 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:15 crc kubenswrapper[4703]: E0130 11:57:15.086101 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.106817 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.112481 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:09Z\\\",\\\"message\\\":\\\"try.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-l8kf2\\\\nI0130 11:57:09.853692 6396 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-cx2rm in node crc\\\\nF0130 11:57:09.853684 6396 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z]\\\\nI0130 11:57:09.853699 6396 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-cx2rm after 0 failed attempt(s)\\\\nI0130 11:57:09.853704 6396 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/mac\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:57:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:15Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.126148 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:15Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.131271 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.131300 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.131309 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.131323 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.131332 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:15Z","lastTransitionTime":"2026-01-30T11:57:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.140292 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33ae9b16-ffdb-4338-ba98-8da799fa7591\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb1699595c372168389cb480f8c41f41a23f856d321138a04599628f1d4e19cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdf1201e129d6158cd5ad6dfb3e93f5ec2a5e75c738edd2dc3bd197e813d6ac5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39eca81eb300b1b4d7a68731db92f76c91270b0bf49f7ae9bcf9643559bcb722\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:15Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.154281 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:15Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.164541 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:15Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.178325 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:15Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.190646 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:15Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.206584 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:15Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.219743 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:15Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.221175 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 01:02:49.108570492 +0000 UTC Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.233082 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:15Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.234350 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.234386 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.234394 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.234408 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.234418 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:15Z","lastTransitionTime":"2026-01-30T11:57:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.247659 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:15Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.261083 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:15Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.276421 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:15Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.291419 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:15Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.308249 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:15Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.323392 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:15Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.337425 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.337482 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.337495 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.337527 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.337543 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:15Z","lastTransitionTime":"2026-01-30T11:57:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.339862 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:15Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.440791 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.440831 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.440844 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.440861 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.440873 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:15Z","lastTransitionTime":"2026-01-30T11:57:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.542906 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.543002 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.543017 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.543032 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.543041 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:15Z","lastTransitionTime":"2026-01-30T11:57:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.646973 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.647039 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.647051 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.647072 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.647086 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:15Z","lastTransitionTime":"2026-01-30T11:57:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.749984 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.750019 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.750028 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.750044 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.750054 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:15Z","lastTransitionTime":"2026-01-30T11:57:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.853316 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.853350 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.853361 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.853377 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.853388 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:15Z","lastTransitionTime":"2026-01-30T11:57:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.955007 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.955050 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.955061 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.955077 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:15 crc kubenswrapper[4703]: I0130 11:57:15.955089 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:15Z","lastTransitionTime":"2026-01-30T11:57:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.057547 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.057586 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.057594 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.057609 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.057621 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:16Z","lastTransitionTime":"2026-01-30T11:57:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.085245 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.085293 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:16 crc kubenswrapper[4703]: E0130 11:57:16.085368 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.085313 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:16 crc kubenswrapper[4703]: E0130 11:57:16.085447 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:16 crc kubenswrapper[4703]: E0130 11:57:16.085555 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.159911 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.159951 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.159961 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.159977 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.159988 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:16Z","lastTransitionTime":"2026-01-30T11:57:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.222042 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 00:37:13.333196719 +0000 UTC Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.262089 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.262152 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.262166 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.262184 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.262195 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:16Z","lastTransitionTime":"2026-01-30T11:57:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.364142 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.364179 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.364191 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.364209 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.364220 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:16Z","lastTransitionTime":"2026-01-30T11:57:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.466495 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.466534 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.466545 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.466566 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.466578 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:16Z","lastTransitionTime":"2026-01-30T11:57:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.569282 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.569345 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.569364 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.569392 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.569410 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:16Z","lastTransitionTime":"2026-01-30T11:57:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.671711 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.671764 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.671778 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.671804 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.671827 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:16Z","lastTransitionTime":"2026-01-30T11:57:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.775253 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.775318 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.775345 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.775377 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.775406 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:16Z","lastTransitionTime":"2026-01-30T11:57:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.877488 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.877533 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.877544 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.877577 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.877590 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:16Z","lastTransitionTime":"2026-01-30T11:57:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.989995 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.990042 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.990053 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.990070 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:16 crc kubenswrapper[4703]: I0130 11:57:16.990079 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:16Z","lastTransitionTime":"2026-01-30T11:57:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.085451 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:17 crc kubenswrapper[4703]: E0130 11:57:17.085572 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.091523 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.091574 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.091588 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.091608 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.091619 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:17Z","lastTransitionTime":"2026-01-30T11:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.194442 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.194500 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.194514 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.194540 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.194553 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:17Z","lastTransitionTime":"2026-01-30T11:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.222700 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 20:03:29.12125369 +0000 UTC Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.299573 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.299633 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.299646 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.299670 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.299684 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:17Z","lastTransitionTime":"2026-01-30T11:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.402421 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.402482 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.402495 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.402519 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.402531 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:17Z","lastTransitionTime":"2026-01-30T11:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.504887 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.504935 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.504952 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.504972 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.504985 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:17Z","lastTransitionTime":"2026-01-30T11:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.607450 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.607508 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.607521 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.607541 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.607554 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:17Z","lastTransitionTime":"2026-01-30T11:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.710608 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.710635 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.710643 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.710659 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.710668 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:17Z","lastTransitionTime":"2026-01-30T11:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.813095 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.813149 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.813160 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.813178 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.813189 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:17Z","lastTransitionTime":"2026-01-30T11:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.915909 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.915942 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.915959 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.915983 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:17 crc kubenswrapper[4703]: I0130 11:57:17.915995 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:17Z","lastTransitionTime":"2026-01-30T11:57:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.018589 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.018633 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.018645 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.018661 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.018672 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:18Z","lastTransitionTime":"2026-01-30T11:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.086274 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.086337 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.086287 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:18 crc kubenswrapper[4703]: E0130 11:57:18.086612 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:18 crc kubenswrapper[4703]: E0130 11:57:18.086701 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:18 crc kubenswrapper[4703]: E0130 11:57:18.086812 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.121064 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.121101 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.121109 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.121143 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.121154 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:18Z","lastTransitionTime":"2026-01-30T11:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.222780 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 12:53:26.571327405 +0000 UTC Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.224882 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.224911 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.224918 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.224933 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.224941 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:18Z","lastTransitionTime":"2026-01-30T11:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.326607 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.326682 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.326696 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.326715 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.326757 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:18Z","lastTransitionTime":"2026-01-30T11:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.429965 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.430017 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.430030 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.430049 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.430061 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:18Z","lastTransitionTime":"2026-01-30T11:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.532106 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.532158 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.532171 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.532206 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.532219 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:18Z","lastTransitionTime":"2026-01-30T11:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.635271 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.635324 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.635336 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.635355 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.635370 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:18Z","lastTransitionTime":"2026-01-30T11:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.739169 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.739234 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.739257 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.739281 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.739298 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:18Z","lastTransitionTime":"2026-01-30T11:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.843413 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.843465 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.843476 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.843496 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.843508 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:18Z","lastTransitionTime":"2026-01-30T11:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.946530 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.946577 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.946586 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.946603 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:18 crc kubenswrapper[4703]: I0130 11:57:18.946634 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:18Z","lastTransitionTime":"2026-01-30T11:57:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.050259 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.050339 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.050365 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.050398 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.050424 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:19Z","lastTransitionTime":"2026-01-30T11:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.085611 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:19 crc kubenswrapper[4703]: E0130 11:57:19.085817 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.153312 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.153361 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.153382 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.153410 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.153423 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:19Z","lastTransitionTime":"2026-01-30T11:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.223717 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 05:25:24.409012716 +0000 UTC Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.256196 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.256240 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.256252 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.256272 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.256284 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:19Z","lastTransitionTime":"2026-01-30T11:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.359515 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.359556 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.359575 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.359656 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.359679 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:19Z","lastTransitionTime":"2026-01-30T11:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.461821 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.461856 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.461865 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.461879 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.461889 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:19Z","lastTransitionTime":"2026-01-30T11:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.564676 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.564761 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.564771 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.564787 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.564796 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:19Z","lastTransitionTime":"2026-01-30T11:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.667497 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.667558 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.667572 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.667592 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.667627 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:19Z","lastTransitionTime":"2026-01-30T11:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.770876 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.770954 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.770972 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.770997 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.771017 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:19Z","lastTransitionTime":"2026-01-30T11:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.874976 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.875013 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.875025 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.875072 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.875083 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:19Z","lastTransitionTime":"2026-01-30T11:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.977484 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.977530 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.977542 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.977559 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:19 crc kubenswrapper[4703]: I0130 11:57:19.977571 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:19Z","lastTransitionTime":"2026-01-30T11:57:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.049437 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs\") pod \"network-metrics-daemon-qrt92\" (UID: \"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\") " pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:20 crc kubenswrapper[4703]: E0130 11:57:20.049571 4703 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 11:57:20 crc kubenswrapper[4703]: E0130 11:57:20.049619 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs podName:ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd nodeName:}" failed. No retries permitted until 2026-01-30 11:57:52.049605564 +0000 UTC m=+107.827427218 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs") pod "network-metrics-daemon-qrt92" (UID: "ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.079534 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.079562 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.079569 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.079584 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.079593 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:20Z","lastTransitionTime":"2026-01-30T11:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.085392 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:20 crc kubenswrapper[4703]: E0130 11:57:20.085479 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.085602 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:20 crc kubenswrapper[4703]: E0130 11:57:20.085663 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.085906 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:20 crc kubenswrapper[4703]: E0130 11:57:20.085959 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.182193 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.182239 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.182258 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.182279 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.182295 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:20Z","lastTransitionTime":"2026-01-30T11:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.224646 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-07 12:13:09.566814411 +0000 UTC Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.284502 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.284538 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.284555 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.284572 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.284581 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:20Z","lastTransitionTime":"2026-01-30T11:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.387093 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.387150 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.387159 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.387369 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.387383 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:20Z","lastTransitionTime":"2026-01-30T11:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.489932 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.489973 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.489983 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.490017 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.490027 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:20Z","lastTransitionTime":"2026-01-30T11:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.592628 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.592664 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.592672 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.592689 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.592698 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:20Z","lastTransitionTime":"2026-01-30T11:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.695153 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.695203 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.695215 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.695235 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.695250 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:20Z","lastTransitionTime":"2026-01-30T11:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.798028 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.798073 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.798085 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.798104 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.798116 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:20Z","lastTransitionTime":"2026-01-30T11:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.904704 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.904757 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.904773 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.904793 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:20 crc kubenswrapper[4703]: I0130 11:57:20.904807 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:20Z","lastTransitionTime":"2026-01-30T11:57:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.006550 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.006634 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.006656 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.006703 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.006727 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:21Z","lastTransitionTime":"2026-01-30T11:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.085471 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:21 crc kubenswrapper[4703]: E0130 11:57:21.085707 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.109767 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.109825 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.109843 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.109867 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.109884 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:21Z","lastTransitionTime":"2026-01-30T11:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.212832 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.212871 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.212890 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.212908 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.212919 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:21Z","lastTransitionTime":"2026-01-30T11:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.225476 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 06:43:00.526618284 +0000 UTC Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.315391 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.315435 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.315450 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.315466 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.315477 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:21Z","lastTransitionTime":"2026-01-30T11:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.418275 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.418327 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.418336 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.418352 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.418362 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:21Z","lastTransitionTime":"2026-01-30T11:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.521550 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.521621 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.521641 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.521666 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.521686 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:21Z","lastTransitionTime":"2026-01-30T11:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.624856 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.624896 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.624907 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.624925 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.624940 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:21Z","lastTransitionTime":"2026-01-30T11:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.728287 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.728361 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.728379 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.728813 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.728868 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:21Z","lastTransitionTime":"2026-01-30T11:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.832903 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.832975 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.832991 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.833009 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.833021 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:21Z","lastTransitionTime":"2026-01-30T11:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.935961 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.936005 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.936016 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.936032 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:21 crc kubenswrapper[4703]: I0130 11:57:21.936044 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:21Z","lastTransitionTime":"2026-01-30T11:57:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.038822 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.038883 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.038901 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.038929 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.038947 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:22Z","lastTransitionTime":"2026-01-30T11:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.085823 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.085838 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.086279 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:22 crc kubenswrapper[4703]: E0130 11:57:22.086379 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:22 crc kubenswrapper[4703]: E0130 11:57:22.086535 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.086602 4703 scope.go:117] "RemoveContainer" containerID="13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53" Jan 30 11:57:22 crc kubenswrapper[4703]: E0130 11:57:22.086681 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:22 crc kubenswrapper[4703]: E0130 11:57:22.088283 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.142282 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.142363 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.142388 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.142416 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.142432 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:22Z","lastTransitionTime":"2026-01-30T11:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.226442 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 11:03:41.602475746 +0000 UTC Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.245371 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.245439 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.245453 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.245478 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.245495 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:22Z","lastTransitionTime":"2026-01-30T11:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.348167 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.348249 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.348263 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.348282 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.348296 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:22Z","lastTransitionTime":"2026-01-30T11:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.450686 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.450770 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.450796 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.450829 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.450855 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:22Z","lastTransitionTime":"2026-01-30T11:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.554273 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.554315 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.554327 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.554345 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.554356 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:22Z","lastTransitionTime":"2026-01-30T11:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.657079 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.657113 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.657151 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.657169 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.657179 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:22Z","lastTransitionTime":"2026-01-30T11:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.760204 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.760283 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.760303 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.760328 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.760347 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:22Z","lastTransitionTime":"2026-01-30T11:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.864001 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.864055 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.864067 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.864087 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.864100 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:22Z","lastTransitionTime":"2026-01-30T11:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.967345 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.967470 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.967543 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.967583 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:22 crc kubenswrapper[4703]: I0130 11:57:22.967604 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:22Z","lastTransitionTime":"2026-01-30T11:57:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.070171 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.070260 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.070281 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.070325 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.070344 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:23Z","lastTransitionTime":"2026-01-30T11:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.085708 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:23 crc kubenswrapper[4703]: E0130 11:57:23.085917 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.173202 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.173257 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.173266 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.173282 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.173292 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:23Z","lastTransitionTime":"2026-01-30T11:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.226981 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 19:14:34.489746509 +0000 UTC Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.276195 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.276247 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.276259 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.276279 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.276292 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:23Z","lastTransitionTime":"2026-01-30T11:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.379622 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.379666 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.379677 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.379694 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.379706 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:23Z","lastTransitionTime":"2026-01-30T11:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.406884 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.406914 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.406921 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.406936 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.406945 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:23Z","lastTransitionTime":"2026-01-30T11:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:23 crc kubenswrapper[4703]: E0130 11:57:23.422024 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:23Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.425906 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.425953 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.425963 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.425980 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.425992 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:23Z","lastTransitionTime":"2026-01-30T11:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:23 crc kubenswrapper[4703]: E0130 11:57:23.439062 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:23Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.442764 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.442882 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.442907 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.442927 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.442947 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:23Z","lastTransitionTime":"2026-01-30T11:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:23 crc kubenswrapper[4703]: E0130 11:57:23.455088 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:23Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.458496 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.458539 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.458553 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.458571 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.458583 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:23Z","lastTransitionTime":"2026-01-30T11:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:23 crc kubenswrapper[4703]: E0130 11:57:23.471428 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:23Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.474718 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.474749 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.474757 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.474771 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.474779 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:23Z","lastTransitionTime":"2026-01-30T11:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:23 crc kubenswrapper[4703]: E0130 11:57:23.488083 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:23Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:23 crc kubenswrapper[4703]: E0130 11:57:23.488284 4703 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.489869 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.489902 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.489910 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.489924 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.489932 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:23Z","lastTransitionTime":"2026-01-30T11:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.592574 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.592700 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.592727 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.592759 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.592782 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:23Z","lastTransitionTime":"2026-01-30T11:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.694843 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.694891 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.694906 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.694927 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.694942 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:23Z","lastTransitionTime":"2026-01-30T11:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.796931 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.796968 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.796976 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.796991 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.797000 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:23Z","lastTransitionTime":"2026-01-30T11:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.899986 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.900036 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.900050 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.900068 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:23 crc kubenswrapper[4703]: I0130 11:57:23.900080 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:23Z","lastTransitionTime":"2026-01-30T11:57:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.002951 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.003010 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.003027 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.003048 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.003059 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:24Z","lastTransitionTime":"2026-01-30T11:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.086229 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.086276 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.086390 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:24 crc kubenswrapper[4703]: E0130 11:57:24.086499 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:24 crc kubenswrapper[4703]: E0130 11:57:24.086577 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:24 crc kubenswrapper[4703]: E0130 11:57:24.086705 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.105005 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.105043 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.105053 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.105069 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.105079 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:24Z","lastTransitionTime":"2026-01-30T11:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.207791 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.207825 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.207834 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.207847 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.207855 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:24Z","lastTransitionTime":"2026-01-30T11:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.227156 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 05:52:34.085355753 +0000 UTC Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.310882 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.310944 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.310964 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.310991 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.311012 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:24Z","lastTransitionTime":"2026-01-30T11:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.413835 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.413877 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.413889 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.413909 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.413921 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:24Z","lastTransitionTime":"2026-01-30T11:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.517012 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.517286 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.517302 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.517325 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.517343 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:24Z","lastTransitionTime":"2026-01-30T11:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.621278 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.621348 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.621369 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.621398 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.621421 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:24Z","lastTransitionTime":"2026-01-30T11:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.724053 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.724106 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.724143 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.724163 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.724176 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:24Z","lastTransitionTime":"2026-01-30T11:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.826637 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.826678 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.826688 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.826704 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.826714 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:24Z","lastTransitionTime":"2026-01-30T11:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.930343 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.930396 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.930408 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.930426 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:24 crc kubenswrapper[4703]: I0130 11:57:24.930465 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:24Z","lastTransitionTime":"2026-01-30T11:57:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.033540 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.033642 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.033653 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.033671 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.033684 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:25Z","lastTransitionTime":"2026-01-30T11:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.086725 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:25 crc kubenswrapper[4703]: E0130 11:57:25.086879 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.106595 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.122950 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.136288 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.136348 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.136397 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.136413 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.136438 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.136455 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:25Z","lastTransitionTime":"2026-01-30T11:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.151105 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.171005 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.185265 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.205426 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:09Z\\\",\\\"message\\\":\\\"try.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-l8kf2\\\\nI0130 11:57:09.853692 6396 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-cx2rm in node crc\\\\nF0130 11:57:09.853684 6396 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z]\\\\nI0130 11:57:09.853699 6396 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-cx2rm after 0 failed attempt(s)\\\\nI0130 11:57:09.853704 6396 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/mac\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:57:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.219537 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.228273 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 11:47:52.154149446 +0000 UTC Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.232758 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33ae9b16-ffdb-4338-ba98-8da799fa7591\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb1699595c372168389cb480f8c41f41a23f856d321138a04599628f1d4e19cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdf1201e129d6158cd5ad6dfb3e93f5ec2a5e75c738edd2dc3bd197e813d6ac5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39eca81eb300b1b4d7a68731db92f76c91270b0bf49f7ae9bcf9643559bcb722\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.241677 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.241733 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.241743 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.241761 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.242151 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:25Z","lastTransitionTime":"2026-01-30T11:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.249522 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.262153 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.275298 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.284770 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.294203 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4c34a2e-afef-43fa-af2f-24fba2afd001\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4cdadd36f3e4479c6957f86bd28947800e8c499b7a0990f3e303e7970b2d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.307620 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.323820 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.339733 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.344310 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.344366 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.344382 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.344409 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.344426 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:25Z","lastTransitionTime":"2026-01-30T11:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.354405 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:25Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.447221 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.447269 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.447286 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.447306 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.447319 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:25Z","lastTransitionTime":"2026-01-30T11:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.550790 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.550827 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.550836 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.550852 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.550861 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:25Z","lastTransitionTime":"2026-01-30T11:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.653174 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.653547 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.653750 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.653950 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.654214 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:25Z","lastTransitionTime":"2026-01-30T11:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.756559 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.756614 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.756627 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.756648 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.756660 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:25Z","lastTransitionTime":"2026-01-30T11:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.859209 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.859654 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.859865 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.860116 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.860315 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:25Z","lastTransitionTime":"2026-01-30T11:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.963140 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.963183 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.963193 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.963213 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:25 crc kubenswrapper[4703]: I0130 11:57:25.963227 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:25Z","lastTransitionTime":"2026-01-30T11:57:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.020648 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-72zlj_874a5df5-f6bd-4111-aefa-f43e43e1fcc0/kube-multus/0.log" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.020700 4703 generic.go:334] "Generic (PLEG): container finished" podID="874a5df5-f6bd-4111-aefa-f43e43e1fcc0" containerID="f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890" exitCode=1 Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.020739 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-72zlj" event={"ID":"874a5df5-f6bd-4111-aefa-f43e43e1fcc0","Type":"ContainerDied","Data":"f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890"} Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.021072 4703 scope.go:117] "RemoveContainer" containerID="f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.037208 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.052284 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.065341 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.065987 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.066029 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.066041 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.066061 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.066072 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:26Z","lastTransitionTime":"2026-01-30T11:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.081664 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.085503 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.085521 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.085509 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:26 crc kubenswrapper[4703]: E0130 11:57:26.085664 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:26 crc kubenswrapper[4703]: E0130 11:57:26.085733 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:26 crc kubenswrapper[4703]: E0130 11:57:26.085933 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.097369 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.109747 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.131549 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:09Z\\\",\\\"message\\\":\\\"try.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-l8kf2\\\\nI0130 11:57:09.853692 6396 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-cx2rm in node crc\\\\nF0130 11:57:09.853684 6396 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z]\\\\nI0130 11:57:09.853699 6396 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-cx2rm after 0 failed attempt(s)\\\\nI0130 11:57:09.853704 6396 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/mac\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:57:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.147007 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.158409 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33ae9b16-ffdb-4338-ba98-8da799fa7591\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb1699595c372168389cb480f8c41f41a23f856d321138a04599628f1d4e19cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdf1201e129d6158cd5ad6dfb3e93f5ec2a5e75c738edd2dc3bd197e813d6ac5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39eca81eb300b1b4d7a68731db92f76c91270b0bf49f7ae9bcf9643559bcb722\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.168653 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.168688 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.168695 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.168729 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.168741 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:26Z","lastTransitionTime":"2026-01-30T11:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.179193 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.191547 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.206114 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:26Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:25Z\\\",\\\"message\\\":\\\"2026-01-30T11:56:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_75c06f9f-7039-423a-ae70-a5ed3bff69c2\\\\n2026-01-30T11:56:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_75c06f9f-7039-423a-ae70-a5ed3bff69c2 to /host/opt/cni/bin/\\\\n2026-01-30T11:56:40Z [verbose] multus-daemon started\\\\n2026-01-30T11:56:40Z [verbose] Readiness Indicator file check\\\\n2026-01-30T11:57:25Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.218318 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.228461 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 01:57:18.488619502 +0000 UTC Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.231036 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4c34a2e-afef-43fa-af2f-24fba2afd001\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4cdadd36f3e4479c6957f86bd28947800e8c499b7a0990f3e303e7970b2d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.251307 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.265314 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.271232 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.271355 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.271373 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.271474 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.271489 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:26Z","lastTransitionTime":"2026-01-30T11:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.281533 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.293287 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:26Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.373600 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.373673 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.373691 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.373719 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.373737 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:26Z","lastTransitionTime":"2026-01-30T11:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.476218 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.476252 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.476261 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.476274 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.476283 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:26Z","lastTransitionTime":"2026-01-30T11:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.578569 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.578606 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.578616 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.578630 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.578641 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:26Z","lastTransitionTime":"2026-01-30T11:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.680959 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.681173 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.681219 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.681252 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.681321 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:26Z","lastTransitionTime":"2026-01-30T11:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.784007 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.784046 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.784058 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.784075 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.784087 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:26Z","lastTransitionTime":"2026-01-30T11:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.887253 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.887316 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.887339 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.887366 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.887388 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:26Z","lastTransitionTime":"2026-01-30T11:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.990300 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.990348 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.990362 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.990383 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:26 crc kubenswrapper[4703]: I0130 11:57:26.990394 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:26Z","lastTransitionTime":"2026-01-30T11:57:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.025358 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-72zlj_874a5df5-f6bd-4111-aefa-f43e43e1fcc0/kube-multus/0.log" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.025650 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-72zlj" event={"ID":"874a5df5-f6bd-4111-aefa-f43e43e1fcc0","Type":"ContainerStarted","Data":"af38bdb6fc351ea256fbb5a368b87ad70202821a6a348e230d540c60694cc014"} Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.052184 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:09Z\\\",\\\"message\\\":\\\"try.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-l8kf2\\\\nI0130 11:57:09.853692 6396 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-cx2rm in node crc\\\\nF0130 11:57:09.853684 6396 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z]\\\\nI0130 11:57:09.853699 6396 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-cx2rm after 0 failed attempt(s)\\\\nI0130 11:57:09.853704 6396 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/mac\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:57:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.064409 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.076557 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33ae9b16-ffdb-4338-ba98-8da799fa7591\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb1699595c372168389cb480f8c41f41a23f856d321138a04599628f1d4e19cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdf1201e129d6158cd5ad6dfb3e93f5ec2a5e75c738edd2dc3bd197e813d6ac5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39eca81eb300b1b4d7a68731db92f76c91270b0bf49f7ae9bcf9643559bcb722\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.086341 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:27 crc kubenswrapper[4703]: E0130 11:57:27.086463 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.089611 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.092411 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.092443 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.092454 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.092475 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.092500 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:27Z","lastTransitionTime":"2026-01-30T11:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.101457 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.116548 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af38bdb6fc351ea256fbb5a368b87ad70202821a6a348e230d540c60694cc014\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:25Z\\\",\\\"message\\\":\\\"2026-01-30T11:56:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_75c06f9f-7039-423a-ae70-a5ed3bff69c2\\\\n2026-01-30T11:56:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_75c06f9f-7039-423a-ae70-a5ed3bff69c2 to /host/opt/cni/bin/\\\\n2026-01-30T11:56:40Z [verbose] multus-daemon started\\\\n2026-01-30T11:56:40Z [verbose] Readiness Indicator file check\\\\n2026-01-30T11:57:25Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:57:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.129198 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.141730 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4c34a2e-afef-43fa-af2f-24fba2afd001\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4cdadd36f3e4479c6957f86bd28947800e8c499b7a0990f3e303e7970b2d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.163226 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.180446 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.195228 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.195274 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.195286 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.195305 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.195329 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:27Z","lastTransitionTime":"2026-01-30T11:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.197438 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.214568 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.228862 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 21:19:05.692473111 +0000 UTC Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.239463 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.252847 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.264325 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.276229 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.289763 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.297960 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.297996 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.298009 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.298029 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.298043 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:27Z","lastTransitionTime":"2026-01-30T11:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.301397 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:27Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.401057 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.401103 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.401114 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.401148 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.401159 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:27Z","lastTransitionTime":"2026-01-30T11:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.503785 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.503866 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.503884 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.503918 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.503960 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:27Z","lastTransitionTime":"2026-01-30T11:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.606235 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.606328 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.606346 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.606371 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.606387 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:27Z","lastTransitionTime":"2026-01-30T11:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.708556 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.708817 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.708903 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.708992 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.709075 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:27Z","lastTransitionTime":"2026-01-30T11:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.811818 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.812238 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.812455 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.812871 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.813079 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:27Z","lastTransitionTime":"2026-01-30T11:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.916468 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.916927 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.917198 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.917451 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:27 crc kubenswrapper[4703]: I0130 11:57:27.917676 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:27Z","lastTransitionTime":"2026-01-30T11:57:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.020077 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.020381 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.020463 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.020562 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.020645 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:28Z","lastTransitionTime":"2026-01-30T11:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.085961 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.086080 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.085961 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:28 crc kubenswrapper[4703]: E0130 11:57:28.086145 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:28 crc kubenswrapper[4703]: E0130 11:57:28.086258 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:28 crc kubenswrapper[4703]: E0130 11:57:28.086368 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.122948 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.122999 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.123009 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.123028 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.123040 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:28Z","lastTransitionTime":"2026-01-30T11:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.225610 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.225685 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.225699 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.225719 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.225732 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:28Z","lastTransitionTime":"2026-01-30T11:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.230053 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 02:53:15.326904201 +0000 UTC Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.328147 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.328185 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.328194 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.328212 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.328221 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:28Z","lastTransitionTime":"2026-01-30T11:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.430268 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.430629 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.430701 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.430778 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.430849 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:28Z","lastTransitionTime":"2026-01-30T11:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.533391 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.533861 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.534100 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.534349 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.534543 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:28Z","lastTransitionTime":"2026-01-30T11:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.637322 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.637382 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.637399 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.637424 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.637442 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:28Z","lastTransitionTime":"2026-01-30T11:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.740085 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.740590 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.740704 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.740814 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.740922 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:28Z","lastTransitionTime":"2026-01-30T11:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.843323 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.843403 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.843423 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.843449 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.843468 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:28Z","lastTransitionTime":"2026-01-30T11:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.946654 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.946910 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.946999 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.947091 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.947253 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:28Z","lastTransitionTime":"2026-01-30T11:57:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.947554 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.947681 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:28 crc kubenswrapper[4703]: E0130 11:57:28.947899 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:32.947880529 +0000 UTC m=+148.725702193 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:57:28 crc kubenswrapper[4703]: E0130 11:57:28.948045 4703 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:57:28 crc kubenswrapper[4703]: E0130 11:57:28.948207 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 11:58:32.948191259 +0000 UTC m=+148.726012923 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:57:28 crc kubenswrapper[4703]: I0130 11:57:28.948451 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:28 crc kubenswrapper[4703]: E0130 11:57:28.948651 4703 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:57:28 crc kubenswrapper[4703]: E0130 11:57:28.948801 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 11:58:32.948789416 +0000 UTC m=+148.726611080 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.049068 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.049112 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:29 crc kubenswrapper[4703]: E0130 11:57:29.049358 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:57:29 crc kubenswrapper[4703]: E0130 11:57:29.049376 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:57:29 crc kubenswrapper[4703]: E0130 11:57:29.049386 4703 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:57:29 crc kubenswrapper[4703]: E0130 11:57:29.049427 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-30 11:58:33.049414052 +0000 UTC m=+148.827235706 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:57:29 crc kubenswrapper[4703]: E0130 11:57:29.049489 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:57:29 crc kubenswrapper[4703]: E0130 11:57:29.049506 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:57:29 crc kubenswrapper[4703]: E0130 11:57:29.049514 4703 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:57:29 crc kubenswrapper[4703]: E0130 11:57:29.049542 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-30 11:58:33.049533665 +0000 UTC m=+148.827355329 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.050757 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.050778 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.050786 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.050798 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.050807 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:29Z","lastTransitionTime":"2026-01-30T11:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.086404 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:29 crc kubenswrapper[4703]: E0130 11:57:29.086578 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.154218 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.154291 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.154310 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.154337 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.154354 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:29Z","lastTransitionTime":"2026-01-30T11:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.231225 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 06:10:34.106822579 +0000 UTC Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.257300 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.257535 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.257551 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.257569 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.257643 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:29Z","lastTransitionTime":"2026-01-30T11:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.360760 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.360817 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.360838 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.360862 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.360878 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:29Z","lastTransitionTime":"2026-01-30T11:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.464027 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.464325 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.464359 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.464393 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.464417 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:29Z","lastTransitionTime":"2026-01-30T11:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.566700 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.566760 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.566776 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.566798 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.566815 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:29Z","lastTransitionTime":"2026-01-30T11:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.668560 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.668616 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.668631 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.668651 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.668667 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:29Z","lastTransitionTime":"2026-01-30T11:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.771795 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.771848 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.771866 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.771891 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.771910 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:29Z","lastTransitionTime":"2026-01-30T11:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.874780 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.874839 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.874856 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.874885 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.874904 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:29Z","lastTransitionTime":"2026-01-30T11:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.977624 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.977672 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.977681 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.977699 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:29 crc kubenswrapper[4703]: I0130 11:57:29.977710 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:29Z","lastTransitionTime":"2026-01-30T11:57:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.080586 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.080802 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.080836 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.080868 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.080892 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:30Z","lastTransitionTime":"2026-01-30T11:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.086009 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.086069 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:30 crc kubenswrapper[4703]: E0130 11:57:30.086175 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.086222 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:30 crc kubenswrapper[4703]: E0130 11:57:30.086414 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:30 crc kubenswrapper[4703]: E0130 11:57:30.086541 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.184339 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.184395 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.184412 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.184438 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.184456 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:30Z","lastTransitionTime":"2026-01-30T11:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.231830 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 23:34:24.190609235 +0000 UTC Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.287139 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.287178 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.287188 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.287206 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.287220 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:30Z","lastTransitionTime":"2026-01-30T11:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.390349 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.390386 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.390397 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.390416 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.390428 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:30Z","lastTransitionTime":"2026-01-30T11:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.492713 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.492977 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.493063 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.493173 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.493382 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:30Z","lastTransitionTime":"2026-01-30T11:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.595838 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.596078 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.596177 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.596276 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.596360 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:30Z","lastTransitionTime":"2026-01-30T11:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.698819 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.699133 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.699209 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.699294 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.699396 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:30Z","lastTransitionTime":"2026-01-30T11:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.801900 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.801928 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.801937 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.801951 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.801961 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:30Z","lastTransitionTime":"2026-01-30T11:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.904505 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.904575 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.904586 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.904602 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:30 crc kubenswrapper[4703]: I0130 11:57:30.904611 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:30Z","lastTransitionTime":"2026-01-30T11:57:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.006623 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.006695 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.006717 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.006747 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.006764 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:31Z","lastTransitionTime":"2026-01-30T11:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.085720 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:31 crc kubenswrapper[4703]: E0130 11:57:31.086017 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.109836 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.109916 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.109935 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.109964 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.109976 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:31Z","lastTransitionTime":"2026-01-30T11:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.211951 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.211998 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.212009 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.212025 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.212034 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:31Z","lastTransitionTime":"2026-01-30T11:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.232375 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 06:45:44.554041978 +0000 UTC Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.314982 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.315068 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.315095 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.315170 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.315192 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:31Z","lastTransitionTime":"2026-01-30T11:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.418422 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.418472 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.418488 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.418509 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.418526 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:31Z","lastTransitionTime":"2026-01-30T11:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.521491 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.521531 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.521542 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.521562 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.521574 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:31Z","lastTransitionTime":"2026-01-30T11:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.623806 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.623859 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.623877 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.623903 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.623921 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:31Z","lastTransitionTime":"2026-01-30T11:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.727335 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.727789 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.727900 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.728037 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.728152 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:31Z","lastTransitionTime":"2026-01-30T11:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.830882 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.830930 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.830942 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.830960 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.830971 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:31Z","lastTransitionTime":"2026-01-30T11:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.933939 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.934324 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.934586 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.934814 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:31 crc kubenswrapper[4703]: I0130 11:57:31.935028 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:31Z","lastTransitionTime":"2026-01-30T11:57:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.037344 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.037419 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.037434 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.037451 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.037461 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:32Z","lastTransitionTime":"2026-01-30T11:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.085875 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.085934 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:32 crc kubenswrapper[4703]: E0130 11:57:32.085991 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.086081 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:32 crc kubenswrapper[4703]: E0130 11:57:32.086194 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:32 crc kubenswrapper[4703]: E0130 11:57:32.086354 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.140800 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.140895 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.140908 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.140933 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.140949 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:32Z","lastTransitionTime":"2026-01-30T11:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.232562 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 05:06:29.114751671 +0000 UTC Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.244409 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.244480 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.244498 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.244527 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.244544 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:32Z","lastTransitionTime":"2026-01-30T11:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.348114 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.348227 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.348243 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.348268 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.348284 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:32Z","lastTransitionTime":"2026-01-30T11:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.451626 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.451695 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.451705 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.451722 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.451733 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:32Z","lastTransitionTime":"2026-01-30T11:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.555692 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.555762 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.555779 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.555807 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.555832 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:32Z","lastTransitionTime":"2026-01-30T11:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.658478 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.658525 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.658541 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.658564 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.658580 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:32Z","lastTransitionTime":"2026-01-30T11:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.761692 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.761753 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.761771 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.761795 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.761813 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:32Z","lastTransitionTime":"2026-01-30T11:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.864983 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.865059 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.865076 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.865104 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.865154 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:32Z","lastTransitionTime":"2026-01-30T11:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.968606 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.969160 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.969329 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.969481 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:32 crc kubenswrapper[4703]: I0130 11:57:32.969603 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:32Z","lastTransitionTime":"2026-01-30T11:57:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.075565 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.075691 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.075713 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.075739 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.075796 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:33Z","lastTransitionTime":"2026-01-30T11:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.085987 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:33 crc kubenswrapper[4703]: E0130 11:57:33.086297 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.178943 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.179022 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.179042 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.179065 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.179163 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:33Z","lastTransitionTime":"2026-01-30T11:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.233259 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 12:08:12.444524331 +0000 UTC Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.282856 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.282915 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.282929 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.282947 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.282961 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:33Z","lastTransitionTime":"2026-01-30T11:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.386267 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.386322 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.386335 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.386354 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.386369 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:33Z","lastTransitionTime":"2026-01-30T11:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.489380 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.489427 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.489439 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.489458 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.489469 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:33Z","lastTransitionTime":"2026-01-30T11:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.591962 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.592013 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.592036 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.592061 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.592077 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:33Z","lastTransitionTime":"2026-01-30T11:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.695029 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.695078 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.695089 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.695107 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.695135 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:33Z","lastTransitionTime":"2026-01-30T11:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.787623 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.787688 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.787705 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.787728 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.787746 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:33Z","lastTransitionTime":"2026-01-30T11:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:33 crc kubenswrapper[4703]: E0130 11:57:33.802242 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.807451 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.807502 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.807518 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.807539 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.807553 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:33Z","lastTransitionTime":"2026-01-30T11:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:33 crc kubenswrapper[4703]: E0130 11:57:33.825724 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.830365 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.830425 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.830437 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.830473 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.830487 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:33Z","lastTransitionTime":"2026-01-30T11:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:33 crc kubenswrapper[4703]: E0130 11:57:33.843649 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.847308 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.847361 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.847379 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.847407 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.847428 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:33Z","lastTransitionTime":"2026-01-30T11:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:33 crc kubenswrapper[4703]: E0130 11:57:33.859878 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.864052 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.864095 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.864113 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.864160 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.864177 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:33Z","lastTransitionTime":"2026-01-30T11:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:33 crc kubenswrapper[4703]: E0130 11:57:33.875698 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:33Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:33 crc kubenswrapper[4703]: E0130 11:57:33.875861 4703 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.877330 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.877357 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.877368 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.877383 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.877394 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:33Z","lastTransitionTime":"2026-01-30T11:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.980064 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.980165 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.980184 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.980210 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:33 crc kubenswrapper[4703]: I0130 11:57:33.980241 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:33Z","lastTransitionTime":"2026-01-30T11:57:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.082349 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.082394 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.082411 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.082434 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.082449 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:34Z","lastTransitionTime":"2026-01-30T11:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.085855 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.085897 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.085899 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:34 crc kubenswrapper[4703]: E0130 11:57:34.086018 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:34 crc kubenswrapper[4703]: E0130 11:57:34.086172 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:34 crc kubenswrapper[4703]: E0130 11:57:34.086264 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.185313 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.185370 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.185398 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.185422 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.185436 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:34Z","lastTransitionTime":"2026-01-30T11:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.234030 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 18:50:12.644387384 +0000 UTC Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.288804 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.288863 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.288876 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.288896 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.288909 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:34Z","lastTransitionTime":"2026-01-30T11:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.391856 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.391898 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.391908 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.391925 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.391940 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:34Z","lastTransitionTime":"2026-01-30T11:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.495554 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.495626 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.495649 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.495683 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.495727 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:34Z","lastTransitionTime":"2026-01-30T11:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.599071 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.599145 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.599156 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.599176 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.599186 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:34Z","lastTransitionTime":"2026-01-30T11:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.702542 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.702610 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.702632 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.702669 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.702696 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:34Z","lastTransitionTime":"2026-01-30T11:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.805769 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.805830 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.805844 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.805866 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.805884 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:34Z","lastTransitionTime":"2026-01-30T11:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.908843 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.909023 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.909051 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.909084 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:34 crc kubenswrapper[4703]: I0130 11:57:34.909102 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:34Z","lastTransitionTime":"2026-01-30T11:57:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.011865 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.011909 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.011918 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.011932 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.011942 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:35Z","lastTransitionTime":"2026-01-30T11:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.085624 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:35 crc kubenswrapper[4703]: E0130 11:57:35.085811 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.099003 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.115887 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.115944 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.115960 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.115994 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.116036 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:35Z","lastTransitionTime":"2026-01-30T11:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.116976 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.130539 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.148062 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.163329 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.176408 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.206425 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:09Z\\\",\\\"message\\\":\\\"try.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-l8kf2\\\\nI0130 11:57:09.853692 6396 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-cx2rm in node crc\\\\nF0130 11:57:09.853684 6396 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z]\\\\nI0130 11:57:09.853699 6396 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-cx2rm after 0 failed attempt(s)\\\\nI0130 11:57:09.853704 6396 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/mac\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:57:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.218418 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.218451 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.218464 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.218492 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.218506 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:35Z","lastTransitionTime":"2026-01-30T11:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.224157 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.233509 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.234513 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 09:30:41.528914936 +0000 UTC Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.266344 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af38bdb6fc351ea256fbb5a368b87ad70202821a6a348e230d540c60694cc014\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:25Z\\\",\\\"message\\\":\\\"2026-01-30T11:56:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_75c06f9f-7039-423a-ae70-a5ed3bff69c2\\\\n2026-01-30T11:56:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_75c06f9f-7039-423a-ae70-a5ed3bff69c2 to /host/opt/cni/bin/\\\\n2026-01-30T11:56:40Z [verbose] multus-daemon started\\\\n2026-01-30T11:56:40Z [verbose] Readiness Indicator file check\\\\n2026-01-30T11:57:25Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:57:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.291168 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.306054 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33ae9b16-ffdb-4338-ba98-8da799fa7591\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb1699595c372168389cb480f8c41f41a23f856d321138a04599628f1d4e19cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdf1201e129d6158cd5ad6dfb3e93f5ec2a5e75c738edd2dc3bd197e813d6ac5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39eca81eb300b1b4d7a68731db92f76c91270b0bf49f7ae9bcf9643559bcb722\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.317920 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.320601 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.320637 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.320648 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.320664 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.320677 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:35Z","lastTransitionTime":"2026-01-30T11:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.332975 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.343662 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.352644 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4c34a2e-afef-43fa-af2f-24fba2afd001\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4cdadd36f3e4479c6957f86bd28947800e8c499b7a0990f3e303e7970b2d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.365216 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.376945 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:35Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.422714 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.422767 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.422776 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.422790 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.422799 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:35Z","lastTransitionTime":"2026-01-30T11:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.525135 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.525168 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.525177 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.525193 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.525201 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:35Z","lastTransitionTime":"2026-01-30T11:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.628334 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.628737 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.628834 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.628948 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.629039 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:35Z","lastTransitionTime":"2026-01-30T11:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.731347 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.731383 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.731392 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.731407 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.731417 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:35Z","lastTransitionTime":"2026-01-30T11:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.834481 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.834823 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.835212 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.835625 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.835908 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:35Z","lastTransitionTime":"2026-01-30T11:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.938434 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.938721 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.938820 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.938908 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:35 crc kubenswrapper[4703]: I0130 11:57:35.938992 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:35Z","lastTransitionTime":"2026-01-30T11:57:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.041828 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.041865 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.041874 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.041891 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.041900 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:36Z","lastTransitionTime":"2026-01-30T11:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.085664 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.085696 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.085707 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:36 crc kubenswrapper[4703]: E0130 11:57:36.086097 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:36 crc kubenswrapper[4703]: E0130 11:57:36.086270 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:36 crc kubenswrapper[4703]: E0130 11:57:36.086371 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.101921 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.144946 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.144996 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.145014 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.145040 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.145061 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:36Z","lastTransitionTime":"2026-01-30T11:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.234746 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 01:58:42.91549809 +0000 UTC Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.248308 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.248417 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.248437 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.248462 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.248482 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:36Z","lastTransitionTime":"2026-01-30T11:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.350450 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.350487 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.350498 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.350514 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.350523 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:36Z","lastTransitionTime":"2026-01-30T11:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.453421 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.453467 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.453482 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.453501 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.453514 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:36Z","lastTransitionTime":"2026-01-30T11:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.556358 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.556399 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.556411 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.556440 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.556459 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:36Z","lastTransitionTime":"2026-01-30T11:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.658908 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.658968 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.658984 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.659007 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.659022 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:36Z","lastTransitionTime":"2026-01-30T11:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.761791 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.761838 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.761855 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.761878 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.761892 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:36Z","lastTransitionTime":"2026-01-30T11:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.864332 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.864381 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.864393 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.864412 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.864426 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:36Z","lastTransitionTime":"2026-01-30T11:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.966875 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.966915 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.966923 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.966937 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:36 crc kubenswrapper[4703]: I0130 11:57:36.966947 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:36Z","lastTransitionTime":"2026-01-30T11:57:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.069683 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.069743 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.069753 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.069770 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.069779 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:37Z","lastTransitionTime":"2026-01-30T11:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.086137 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:37 crc kubenswrapper[4703]: E0130 11:57:37.086301 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.086832 4703 scope.go:117] "RemoveContainer" containerID="13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.172343 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.172626 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.172640 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.172689 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.172702 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:37Z","lastTransitionTime":"2026-01-30T11:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.235880 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 20:36:04.336305368 +0000 UTC Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.274873 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.275158 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.275276 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.275389 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.275497 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:37Z","lastTransitionTime":"2026-01-30T11:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.378241 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.378272 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.378281 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.378296 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.378305 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:37Z","lastTransitionTime":"2026-01-30T11:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.481192 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.481231 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.481240 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.481255 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.481267 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:37Z","lastTransitionTime":"2026-01-30T11:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.585076 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.585148 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.585164 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.585184 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.585196 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:37Z","lastTransitionTime":"2026-01-30T11:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.687229 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.687290 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.687310 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.687330 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.687344 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:37Z","lastTransitionTime":"2026-01-30T11:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.791480 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.791578 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.791598 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.791625 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.791650 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:37Z","lastTransitionTime":"2026-01-30T11:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.894843 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.894889 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.894900 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.894918 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:37 crc kubenswrapper[4703]: I0130 11:57:37.894929 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:37Z","lastTransitionTime":"2026-01-30T11:57:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.009111 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.009186 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.009199 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.009219 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.009272 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:38Z","lastTransitionTime":"2026-01-30T11:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.063901 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovnkube-controller/2.log" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.066706 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerStarted","Data":"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f"} Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.067668 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.083771 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4c34a2e-afef-43fa-af2f-24fba2afd001\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4cdadd36f3e4479c6957f86bd28947800e8c499b7a0990f3e303e7970b2d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.085870 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.085884 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:38 crc kubenswrapper[4703]: E0130 11:57:38.086022 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.085884 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:38 crc kubenswrapper[4703]: E0130 11:57:38.086213 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:38 crc kubenswrapper[4703]: E0130 11:57:38.086279 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.101705 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.123674 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.123726 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.123738 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.123757 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.123769 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:38Z","lastTransitionTime":"2026-01-30T11:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.126744 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.144037 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.157294 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.170444 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.183203 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.194665 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.207370 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.222068 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.226849 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.226889 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.226897 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.226913 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.226923 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:38Z","lastTransitionTime":"2026-01-30T11:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.234054 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.236181 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 23:04:07.733892691 +0000 UTC Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.256733 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:09Z\\\",\\\"message\\\":\\\"try.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-l8kf2\\\\nI0130 11:57:09.853692 6396 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-cx2rm in node crc\\\\nF0130 11:57:09.853684 6396 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z]\\\\nI0130 11:57:09.853699 6396 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-cx2rm after 0 failed attempt(s)\\\\nI0130 11:57:09.853704 6396 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/mac\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:57:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:57:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.268482 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.280692 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33ae9b16-ffdb-4338-ba98-8da799fa7591\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb1699595c372168389cb480f8c41f41a23f856d321138a04599628f1d4e19cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdf1201e129d6158cd5ad6dfb3e93f5ec2a5e75c738edd2dc3bd197e813d6ac5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39eca81eb300b1b4d7a68731db92f76c91270b0bf49f7ae9bcf9643559bcb722\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.300353 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e547aae0-afa8-4515-a56d-d9632829c0bd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7a3c6cecb06c3b1c48c12feaf2c5afb8df7b18a2bdf2749ebaac2c4398952a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2abd05b4fe2583b0862c3a746dfe5f395f579562e27fb2c5a5b9e45f15683b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d969f6a449492f80217365d4fef8a9e7c302491081301eac57b30ed0ea6ac65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cfbb3957994fbace415cea1e8381c0ec9ab6c036fe32768c63e957635694ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c96c2d502863efd645a409e26825cee60ad08f0993460e694008488a07f1cb06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e4862890012338bc9c73abe374c27de5319127544543a7a7bb6af992c287612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e4862890012338bc9c73abe374c27de5319127544543a7a7bb6af992c287612\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5c24e73e0378455aef55023c1da4bc5434e9c613367b6ee81854001c56e070\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c5c24e73e0378455aef55023c1da4bc5434e9c613367b6ee81854001c56e070\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b8073aa1ab6fa6059ef8e0166bd98c5093e50995057da89705fbe84930521dac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8073aa1ab6fa6059ef8e0166bd98c5093e50995057da89705fbe84930521dac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.329374 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.329419 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.329432 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.329465 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.329477 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:38Z","lastTransitionTime":"2026-01-30T11:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.330547 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.341777 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.355818 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af38bdb6fc351ea256fbb5a368b87ad70202821a6a348e230d540c60694cc014\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:25Z\\\",\\\"message\\\":\\\"2026-01-30T11:56:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_75c06f9f-7039-423a-ae70-a5ed3bff69c2\\\\n2026-01-30T11:56:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_75c06f9f-7039-423a-ae70-a5ed3bff69c2 to /host/opt/cni/bin/\\\\n2026-01-30T11:56:40Z [verbose] multus-daemon started\\\\n2026-01-30T11:56:40Z [verbose] Readiness Indicator file check\\\\n2026-01-30T11:57:25Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:57:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.368937 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:38Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.432697 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.432750 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.432761 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.432778 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.432787 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:38Z","lastTransitionTime":"2026-01-30T11:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.535390 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.535427 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.535439 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.535456 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.535467 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:38Z","lastTransitionTime":"2026-01-30T11:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.637877 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.637917 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.637925 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.637941 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.637950 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:38Z","lastTransitionTime":"2026-01-30T11:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.739831 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.739870 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.739879 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.739895 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.739906 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:38Z","lastTransitionTime":"2026-01-30T11:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.842564 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.842610 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.842618 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.842634 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.842645 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:38Z","lastTransitionTime":"2026-01-30T11:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.945086 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.945145 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.945157 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.945174 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:38 crc kubenswrapper[4703]: I0130 11:57:38.945183 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:38Z","lastTransitionTime":"2026-01-30T11:57:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.047507 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.047556 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.047565 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.047583 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.047594 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:39Z","lastTransitionTime":"2026-01-30T11:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.085657 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:39 crc kubenswrapper[4703]: E0130 11:57:39.085795 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.149001 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.149049 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.149063 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.149082 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.149096 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:39Z","lastTransitionTime":"2026-01-30T11:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.237102 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 14:33:49.30657027 +0000 UTC Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.251749 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.251947 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.252023 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.252106 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.252186 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:39Z","lastTransitionTime":"2026-01-30T11:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.354910 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.354945 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.354953 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.354968 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.354978 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:39Z","lastTransitionTime":"2026-01-30T11:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.457845 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.457881 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.457893 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.457909 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.457920 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:39Z","lastTransitionTime":"2026-01-30T11:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.561533 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.561608 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.561655 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.561739 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.561759 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:39Z","lastTransitionTime":"2026-01-30T11:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.663538 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.663828 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.663939 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.664088 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.664222 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:39Z","lastTransitionTime":"2026-01-30T11:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.766833 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.766918 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.766930 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.766958 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.766972 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:39Z","lastTransitionTime":"2026-01-30T11:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.869902 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.870217 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.870336 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.870497 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.870811 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:39Z","lastTransitionTime":"2026-01-30T11:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.988499 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.988550 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.988562 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.988584 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:39 crc kubenswrapper[4703]: I0130 11:57:39.988597 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:39Z","lastTransitionTime":"2026-01-30T11:57:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.086315 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.086350 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.086387 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:40 crc kubenswrapper[4703]: E0130 11:57:40.086507 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:40 crc kubenswrapper[4703]: E0130 11:57:40.086608 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:40 crc kubenswrapper[4703]: E0130 11:57:40.086714 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.091159 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.091246 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.091263 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.091284 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.091298 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:40Z","lastTransitionTime":"2026-01-30T11:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.152700 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovnkube-controller/3.log" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.153893 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovnkube-controller/2.log" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.157202 4703 generic.go:334] "Generic (PLEG): container finished" podID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerID="ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f" exitCode=1 Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.157292 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerDied","Data":"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f"} Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.157403 4703 scope.go:117] "RemoveContainer" containerID="13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.158502 4703 scope.go:117] "RemoveContainer" containerID="ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f" Jan 30 11:57:40 crc kubenswrapper[4703]: E0130 11:57:40.158752 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.232555 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.232600 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.232610 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.232627 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.232640 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:40Z","lastTransitionTime":"2026-01-30T11:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.238598 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 07:22:27.637581654 +0000 UTC Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.239488 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.264044 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.279922 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.294104 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.308932 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.320138 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.335271 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.335317 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.335334 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.335352 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.335362 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:40Z","lastTransitionTime":"2026-01-30T11:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.337182 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:09Z\\\",\\\"message\\\":\\\"try.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-l8kf2\\\\nI0130 11:57:09.853692 6396 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-cx2rm in node crc\\\\nF0130 11:57:09.853684 6396 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z]\\\\nI0130 11:57:09.853699 6396 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-cx2rm after 0 failed attempt(s)\\\\nI0130 11:57:09.853704 6396 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/mac\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:57:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:39Z\\\",\\\"message\\\":\\\"ner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/catalog-operator-metrics\\\\\\\"}\\\\nI0130 11:57:39.362887 6751 services_controller.go:360] Finished syncing service catalog-operator-metrics on namespace openshift-operator-lifecycle-manager for network=default : 2.581886ms\\\\nI0130 11:57:39.363073 6751 address_set.go:302] New(0d39bc5c-d5b9-432c-81be-2275bce5d7aa/default-network-controller:EgressIP:node-ips:v4:default/a712973235162149816) with []\\\\nI0130 11:57:39.363094 6751 address_set.go:302] New(aa6fc2dc-fab0-4812-b9da-809058e4dcf7/default-network-controller:EgressIP:egressip-served-pods:v4:default/a8519615025667110816) with []\\\\nI0130 11:57:39.363110 6751 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI0130 11:57:39.363175 6751 factory.go:1336] Added *v1.Node event handler 7\\\\nI0130 11:57:39.363238 6751 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI0130 11:57:39.363596 6751 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0130 11:57:39.363690 6751 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0130 11:57:39.363760 6751 ovnkube.go:599] Stopped ovnkube\\\\nI0130 11:57:39.363814 6751 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0130 11:57:39.363886 6751 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:57:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.348232 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.360883 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af38bdb6fc351ea256fbb5a368b87ad70202821a6a348e230d540c60694cc014\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:25Z\\\",\\\"message\\\":\\\"2026-01-30T11:56:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_75c06f9f-7039-423a-ae70-a5ed3bff69c2\\\\n2026-01-30T11:56:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_75c06f9f-7039-423a-ae70-a5ed3bff69c2 to /host/opt/cni/bin/\\\\n2026-01-30T11:56:40Z [verbose] multus-daemon started\\\\n2026-01-30T11:56:40Z [verbose] Readiness Indicator file check\\\\n2026-01-30T11:57:25Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:57:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.371373 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.381876 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33ae9b16-ffdb-4338-ba98-8da799fa7591\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb1699595c372168389cb480f8c41f41a23f856d321138a04599628f1d4e19cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdf1201e129d6158cd5ad6dfb3e93f5ec2a5e75c738edd2dc3bd197e813d6ac5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39eca81eb300b1b4d7a68731db92f76c91270b0bf49f7ae9bcf9643559bcb722\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.401251 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e547aae0-afa8-4515-a56d-d9632829c0bd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7a3c6cecb06c3b1c48c12feaf2c5afb8df7b18a2bdf2749ebaac2c4398952a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2abd05b4fe2583b0862c3a746dfe5f395f579562e27fb2c5a5b9e45f15683b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d969f6a449492f80217365d4fef8a9e7c302491081301eac57b30ed0ea6ac65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cfbb3957994fbace415cea1e8381c0ec9ab6c036fe32768c63e957635694ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c96c2d502863efd645a409e26825cee60ad08f0993460e694008488a07f1cb06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e4862890012338bc9c73abe374c27de5319127544543a7a7bb6af992c287612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e4862890012338bc9c73abe374c27de5319127544543a7a7bb6af992c287612\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5c24e73e0378455aef55023c1da4bc5434e9c613367b6ee81854001c56e070\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c5c24e73e0378455aef55023c1da4bc5434e9c613367b6ee81854001c56e070\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b8073aa1ab6fa6059ef8e0166bd98c5093e50995057da89705fbe84930521dac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8073aa1ab6fa6059ef8e0166bd98c5093e50995057da89705fbe84930521dac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.413600 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.423991 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.434330 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.437024 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.437062 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.437076 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.437092 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.437103 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:40Z","lastTransitionTime":"2026-01-30T11:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.445581 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4c34a2e-afef-43fa-af2f-24fba2afd001\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4cdadd36f3e4479c6957f86bd28947800e8c499b7a0990f3e303e7970b2d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.459363 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.473711 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.485375 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:40Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.540179 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.540281 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.540293 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.540307 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.540316 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:40Z","lastTransitionTime":"2026-01-30T11:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.642204 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.642262 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.642273 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.642286 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.642296 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:40Z","lastTransitionTime":"2026-01-30T11:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.745181 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.745245 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.745260 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.745279 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.745291 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:40Z","lastTransitionTime":"2026-01-30T11:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.847898 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.847954 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.847969 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.847990 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.848003 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:40Z","lastTransitionTime":"2026-01-30T11:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.951382 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.951426 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.951435 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.951453 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:40 crc kubenswrapper[4703]: I0130 11:57:40.951465 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:40Z","lastTransitionTime":"2026-01-30T11:57:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.054256 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.054326 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.054340 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.054363 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.054377 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:41Z","lastTransitionTime":"2026-01-30T11:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.085919 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:41 crc kubenswrapper[4703]: E0130 11:57:41.086100 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.156808 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.156852 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.156863 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.156942 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.156953 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:41Z","lastTransitionTime":"2026-01-30T11:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.162003 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovnkube-controller/3.log" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.239183 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 19:08:40.921243911 +0000 UTC Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.259914 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.259958 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.259968 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.259984 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.259994 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:41Z","lastTransitionTime":"2026-01-30T11:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.361998 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.362043 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.362056 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.362075 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.362090 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:41Z","lastTransitionTime":"2026-01-30T11:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.464433 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.464492 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.464505 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.464526 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.464538 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:41Z","lastTransitionTime":"2026-01-30T11:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.567103 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.567184 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.567200 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.567221 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.567235 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:41Z","lastTransitionTime":"2026-01-30T11:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.669679 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.669723 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.669737 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.669760 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.669774 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:41Z","lastTransitionTime":"2026-01-30T11:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.772680 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.772742 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.772755 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.772774 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.772785 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:41Z","lastTransitionTime":"2026-01-30T11:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.876093 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.876160 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.876170 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.876185 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.876199 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:41Z","lastTransitionTime":"2026-01-30T11:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.980070 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.980109 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.980138 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.980156 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:41 crc kubenswrapper[4703]: I0130 11:57:41.980168 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:41Z","lastTransitionTime":"2026-01-30T11:57:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.082779 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.082816 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.082824 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.082838 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.082847 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:42Z","lastTransitionTime":"2026-01-30T11:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.086337 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:42 crc kubenswrapper[4703]: E0130 11:57:42.086538 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.086797 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:42 crc kubenswrapper[4703]: E0130 11:57:42.086929 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.087081 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:42 crc kubenswrapper[4703]: E0130 11:57:42.087251 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.186489 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.186555 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.186573 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.186598 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.186618 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:42Z","lastTransitionTime":"2026-01-30T11:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.239985 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-27 06:14:45.384300165 +0000 UTC Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.290506 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.290948 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.291253 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.291490 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.291994 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:42Z","lastTransitionTime":"2026-01-30T11:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.394939 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.395431 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.395623 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.395806 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.395966 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:42Z","lastTransitionTime":"2026-01-30T11:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.499661 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.499727 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.499750 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.499780 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.499802 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:42Z","lastTransitionTime":"2026-01-30T11:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.603483 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.603575 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.603594 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.603620 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.603637 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:42Z","lastTransitionTime":"2026-01-30T11:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.706723 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.706775 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.706788 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.706808 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.706821 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:42Z","lastTransitionTime":"2026-01-30T11:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.810654 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.810711 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.810727 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.810752 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.810769 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:42Z","lastTransitionTime":"2026-01-30T11:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.913669 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.913713 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.913754 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.913771 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:42 crc kubenswrapper[4703]: I0130 11:57:42.913781 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:42Z","lastTransitionTime":"2026-01-30T11:57:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.016502 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.016576 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.016590 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.016609 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.016624 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:43Z","lastTransitionTime":"2026-01-30T11:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.085766 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:43 crc kubenswrapper[4703]: E0130 11:57:43.086007 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.120267 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.120326 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.120344 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.120370 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.120389 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:43Z","lastTransitionTime":"2026-01-30T11:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.223355 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.223392 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.223416 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.223440 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.223453 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:43Z","lastTransitionTime":"2026-01-30T11:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.242098 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 19:14:53.532122982 +0000 UTC Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.327044 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.327346 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.327386 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.327420 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.327438 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:43Z","lastTransitionTime":"2026-01-30T11:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.430819 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.430875 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.430890 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.431163 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.431176 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:43Z","lastTransitionTime":"2026-01-30T11:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.534607 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.534674 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.534700 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.534732 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.534764 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:43Z","lastTransitionTime":"2026-01-30T11:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.638228 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.638529 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.638625 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.638722 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.638838 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:43Z","lastTransitionTime":"2026-01-30T11:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.741145 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.741186 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.741196 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.741214 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.741224 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:43Z","lastTransitionTime":"2026-01-30T11:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.844803 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.845132 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.845140 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.845156 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.845165 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:43Z","lastTransitionTime":"2026-01-30T11:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.948224 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.948283 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.948308 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.948334 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:43 crc kubenswrapper[4703]: I0130 11:57:43.948350 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:43Z","lastTransitionTime":"2026-01-30T11:57:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.046253 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.046566 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.046666 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.046868 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.046977 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:44Z","lastTransitionTime":"2026-01-30T11:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:44 crc kubenswrapper[4703]: E0130 11:57:44.072149 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.078234 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.078270 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.078282 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.078299 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.078310 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:44Z","lastTransitionTime":"2026-01-30T11:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.085588 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.085786 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.085928 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:44 crc kubenswrapper[4703]: E0130 11:57:44.086148 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:44 crc kubenswrapper[4703]: E0130 11:57:44.086479 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:44 crc kubenswrapper[4703]: E0130 11:57:44.086631 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:44 crc kubenswrapper[4703]: E0130 11:57:44.093643 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.098628 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.098842 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.098907 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.098973 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.099042 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:44Z","lastTransitionTime":"2026-01-30T11:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:44 crc kubenswrapper[4703]: E0130 11:57:44.114382 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.118731 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.118769 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.118781 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.118802 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.118817 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:44Z","lastTransitionTime":"2026-01-30T11:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:44 crc kubenswrapper[4703]: E0130 11:57:44.132074 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.137500 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.137562 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.137579 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.137601 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.137617 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:44Z","lastTransitionTime":"2026-01-30T11:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:44 crc kubenswrapper[4703]: E0130 11:57:44.153715 4703 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"0ee38ffa-0913-4f86-a89a-42f0682f685a\\\",\\\"systemUUID\\\":\\\"4234120e-d9fe-419a-b814-ae76a780f4ec\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:44Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:44 crc kubenswrapper[4703]: E0130 11:57:44.153870 4703 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.155769 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.155801 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.155809 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.155824 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.155834 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:44Z","lastTransitionTime":"2026-01-30T11:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.243064 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 10:04:27.004841936 +0000 UTC Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.258036 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.258405 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.258537 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.258731 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.259016 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:44Z","lastTransitionTime":"2026-01-30T11:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.361710 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.361754 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.361764 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.361784 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.361796 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:44Z","lastTransitionTime":"2026-01-30T11:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.464404 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.464448 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.464460 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.464478 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.464505 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:44Z","lastTransitionTime":"2026-01-30T11:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.567731 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.567808 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.567835 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.567867 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.567890 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:44Z","lastTransitionTime":"2026-01-30T11:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.670451 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.670493 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.670504 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.670521 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.670530 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:44Z","lastTransitionTime":"2026-01-30T11:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.773392 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.773437 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.773449 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.773467 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.773490 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:44Z","lastTransitionTime":"2026-01-30T11:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.876271 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.876311 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.876322 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.876338 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.876350 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:44Z","lastTransitionTime":"2026-01-30T11:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.978460 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.978771 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.978845 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.978908 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:44 crc kubenswrapper[4703]: I0130 11:57:44.978977 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:44Z","lastTransitionTime":"2026-01-30T11:57:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.082197 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.082461 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.082545 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.082650 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.082721 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:45Z","lastTransitionTime":"2026-01-30T11:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.085996 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:45 crc kubenswrapper[4703]: E0130 11:57:45.086241 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.105200 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.120455 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.137487 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.153699 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.170407 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.185476 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.185542 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.185557 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.185580 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.185593 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:45Z","lastTransitionTime":"2026-01-30T11:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.187020 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.219323 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13604acff234530c9f9c840854b0561cc60cf1ce5f1fb76c1392210609442c53\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:09Z\\\",\\\"message\\\":\\\"try.go:303] Retry object setup: *v1.Pod openshift-dns/node-resolver-l8kf2\\\\nI0130 11:57:09.853692 6396 ovn.go:134] Ensuring zone local for Pod openshift-machine-config-operator/machine-config-daemon-cx2rm in node crc\\\\nF0130 11:57:09.853684 6396 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:09Z is after 2025-08-24T17:21:41Z]\\\\nI0130 11:57:09.853699 6396 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-cx2rm after 0 failed attempt(s)\\\\nI0130 11:57:09.853704 6396 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/mac\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:57:08Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:39Z\\\",\\\"message\\\":\\\"ner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/catalog-operator-metrics\\\\\\\"}\\\\nI0130 11:57:39.362887 6751 services_controller.go:360] Finished syncing service catalog-operator-metrics on namespace openshift-operator-lifecycle-manager for network=default : 2.581886ms\\\\nI0130 11:57:39.363073 6751 address_set.go:302] New(0d39bc5c-d5b9-432c-81be-2275bce5d7aa/default-network-controller:EgressIP:node-ips:v4:default/a712973235162149816) with []\\\\nI0130 11:57:39.363094 6751 address_set.go:302] New(aa6fc2dc-fab0-4812-b9da-809058e4dcf7/default-network-controller:EgressIP:egressip-served-pods:v4:default/a8519615025667110816) with []\\\\nI0130 11:57:39.363110 6751 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI0130 11:57:39.363175 6751 factory.go:1336] Added *v1.Node event handler 7\\\\nI0130 11:57:39.363238 6751 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI0130 11:57:39.363596 6751 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0130 11:57:39.363690 6751 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0130 11:57:39.363760 6751 ovnkube.go:599] Stopped ovnkube\\\\nI0130 11:57:39.363814 6751 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0130 11:57:39.363886 6751 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:57:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.233953 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.244702 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 02:01:04.713667739 +0000 UTC Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.250990 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33ae9b16-ffdb-4338-ba98-8da799fa7591\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb1699595c372168389cb480f8c41f41a23f856d321138a04599628f1d4e19cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdf1201e129d6158cd5ad6dfb3e93f5ec2a5e75c738edd2dc3bd197e813d6ac5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39eca81eb300b1b4d7a68731db92f76c91270b0bf49f7ae9bcf9643559bcb722\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.274047 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e547aae0-afa8-4515-a56d-d9632829c0bd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7a3c6cecb06c3b1c48c12feaf2c5afb8df7b18a2bdf2749ebaac2c4398952a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2abd05b4fe2583b0862c3a746dfe5f395f579562e27fb2c5a5b9e45f15683b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d969f6a449492f80217365d4fef8a9e7c302491081301eac57b30ed0ea6ac65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cfbb3957994fbace415cea1e8381c0ec9ab6c036fe32768c63e957635694ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c96c2d502863efd645a409e26825cee60ad08f0993460e694008488a07f1cb06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e4862890012338bc9c73abe374c27de5319127544543a7a7bb6af992c287612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e4862890012338bc9c73abe374c27de5319127544543a7a7bb6af992c287612\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5c24e73e0378455aef55023c1da4bc5434e9c613367b6ee81854001c56e070\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c5c24e73e0378455aef55023c1da4bc5434e9c613367b6ee81854001c56e070\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b8073aa1ab6fa6059ef8e0166bd98c5093e50995057da89705fbe84930521dac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8073aa1ab6fa6059ef8e0166bd98c5093e50995057da89705fbe84930521dac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.288900 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.289289 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.289328 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.289344 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.289364 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.289375 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:45Z","lastTransitionTime":"2026-01-30T11:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.300045 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.314880 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af38bdb6fc351ea256fbb5a368b87ad70202821a6a348e230d540c60694cc014\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:25Z\\\",\\\"message\\\":\\\"2026-01-30T11:56:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_75c06f9f-7039-423a-ae70-a5ed3bff69c2\\\\n2026-01-30T11:56:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_75c06f9f-7039-423a-ae70-a5ed3bff69c2 to /host/opt/cni/bin/\\\\n2026-01-30T11:56:40Z [verbose] multus-daemon started\\\\n2026-01-30T11:56:40Z [verbose] Readiness Indicator file check\\\\n2026-01-30T11:57:25Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:57:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.327772 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.340215 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4c34a2e-afef-43fa-af2f-24fba2afd001\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4cdadd36f3e4479c6957f86bd28947800e8c499b7a0990f3e303e7970b2d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.357824 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.375378 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.392260 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.393184 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.393914 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.394157 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.394368 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.394545 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:45Z","lastTransitionTime":"2026-01-30T11:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.405181 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:45Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.497973 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.498466 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.498570 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.498671 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.498757 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:45Z","lastTransitionTime":"2026-01-30T11:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.601597 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.601652 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.601673 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.601703 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.601723 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:45Z","lastTransitionTime":"2026-01-30T11:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.703900 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.704310 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.704343 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.704367 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.704380 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:45Z","lastTransitionTime":"2026-01-30T11:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.807826 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.807905 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.807930 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.807960 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.807981 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:45Z","lastTransitionTime":"2026-01-30T11:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.910654 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.910716 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.910735 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.910761 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:45 crc kubenswrapper[4703]: I0130 11:57:45.910778 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:45Z","lastTransitionTime":"2026-01-30T11:57:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.013712 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.013760 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.013778 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.013809 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.013830 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:46Z","lastTransitionTime":"2026-01-30T11:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.085590 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.085641 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:46 crc kubenswrapper[4703]: E0130 11:57:46.085723 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.085599 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:46 crc kubenswrapper[4703]: E0130 11:57:46.085873 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:46 crc kubenswrapper[4703]: E0130 11:57:46.085912 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.116726 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.116783 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.116799 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.116824 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.116843 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:46Z","lastTransitionTime":"2026-01-30T11:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.219104 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.219207 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.219224 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.219245 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.219259 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:46Z","lastTransitionTime":"2026-01-30T11:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.245592 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 10:39:38.468482012 +0000 UTC Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.322148 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.322194 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.322204 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.322220 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.322232 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:46Z","lastTransitionTime":"2026-01-30T11:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.425525 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.425560 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.425572 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.425589 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.425602 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:46Z","lastTransitionTime":"2026-01-30T11:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.528875 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.528929 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.528946 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.528971 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.528988 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:46Z","lastTransitionTime":"2026-01-30T11:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.631518 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.631581 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.631593 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.631612 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.631621 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:46Z","lastTransitionTime":"2026-01-30T11:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.734311 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.734355 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.734368 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.734387 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.734399 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:46Z","lastTransitionTime":"2026-01-30T11:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.838056 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.838141 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.838163 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.838184 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.838198 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:46Z","lastTransitionTime":"2026-01-30T11:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.941183 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.941214 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.941230 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.941247 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:46 crc kubenswrapper[4703]: I0130 11:57:46.941258 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:46Z","lastTransitionTime":"2026-01-30T11:57:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.043385 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.043439 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.043455 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.043476 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.043490 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:47Z","lastTransitionTime":"2026-01-30T11:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.085916 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:47 crc kubenswrapper[4703]: E0130 11:57:47.086091 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.145430 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.145478 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.145501 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.145525 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.145539 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:47Z","lastTransitionTime":"2026-01-30T11:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.246448 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 19:24:26.026822142 +0000 UTC Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.248062 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.248109 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.248145 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.248165 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.248177 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:47Z","lastTransitionTime":"2026-01-30T11:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.351252 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.351320 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.351346 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.351379 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.351401 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:47Z","lastTransitionTime":"2026-01-30T11:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.454515 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.454559 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.454574 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.454591 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.454603 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:47Z","lastTransitionTime":"2026-01-30T11:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.557666 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.557721 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.557734 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.557753 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.557768 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:47Z","lastTransitionTime":"2026-01-30T11:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.660897 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.660971 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.660996 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.661026 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.661049 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:47Z","lastTransitionTime":"2026-01-30T11:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.764764 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.764843 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.764867 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.764901 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.764925 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:47Z","lastTransitionTime":"2026-01-30T11:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.868396 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.868452 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.868463 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.868485 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.868498 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:47Z","lastTransitionTime":"2026-01-30T11:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.972587 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.972658 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.972672 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.972691 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:47 crc kubenswrapper[4703]: I0130 11:57:47.972703 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:47Z","lastTransitionTime":"2026-01-30T11:57:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.076161 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.076260 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.076277 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.076341 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.076368 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:48Z","lastTransitionTime":"2026-01-30T11:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.085440 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.085488 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.085503 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:48 crc kubenswrapper[4703]: E0130 11:57:48.085661 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:48 crc kubenswrapper[4703]: E0130 11:57:48.085802 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:48 crc kubenswrapper[4703]: E0130 11:57:48.086000 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.179277 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.179513 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.179533 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.179550 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.179560 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:48Z","lastTransitionTime":"2026-01-30T11:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.247461 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 10:25:52.183026707 +0000 UTC Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.282471 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.282515 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.282528 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.282547 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.282560 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:48Z","lastTransitionTime":"2026-01-30T11:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.385272 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.385343 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.385353 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.385377 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.385393 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:48Z","lastTransitionTime":"2026-01-30T11:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.489309 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.489395 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.489408 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.489430 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.489443 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:48Z","lastTransitionTime":"2026-01-30T11:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.592624 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.592681 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.592692 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.592715 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.592730 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:48Z","lastTransitionTime":"2026-01-30T11:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.696004 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.696067 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.696081 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.696104 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.696135 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:48Z","lastTransitionTime":"2026-01-30T11:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.799357 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.799400 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.799411 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.799426 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.799436 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:48Z","lastTransitionTime":"2026-01-30T11:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.902377 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.902425 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.902436 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.902459 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:48 crc kubenswrapper[4703]: I0130 11:57:48.902472 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:48Z","lastTransitionTime":"2026-01-30T11:57:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.005537 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.005622 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.005641 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.005665 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.005683 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:49Z","lastTransitionTime":"2026-01-30T11:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.085685 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:49 crc kubenswrapper[4703]: E0130 11:57:49.086029 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.110651 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.110764 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.110789 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.110812 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.110822 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:49Z","lastTransitionTime":"2026-01-30T11:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.213391 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.213466 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.213495 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.213532 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.213556 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:49Z","lastTransitionTime":"2026-01-30T11:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.248451 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 14:24:55.541421206 +0000 UTC Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.317340 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.317382 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.317390 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.317407 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.317437 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:49Z","lastTransitionTime":"2026-01-30T11:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.420005 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.420081 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.420101 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.420168 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.420191 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:49Z","lastTransitionTime":"2026-01-30T11:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.523288 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.523328 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.523338 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.523355 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.523366 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:49Z","lastTransitionTime":"2026-01-30T11:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.625714 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.625771 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.625781 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.625797 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.625808 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:49Z","lastTransitionTime":"2026-01-30T11:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.729794 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.729837 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.729849 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.729868 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.729881 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:49Z","lastTransitionTime":"2026-01-30T11:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.832320 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.832375 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.832386 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.832403 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.832413 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:49Z","lastTransitionTime":"2026-01-30T11:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.934425 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.934461 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.934472 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.934487 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:49 crc kubenswrapper[4703]: I0130 11:57:49.934497 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:49Z","lastTransitionTime":"2026-01-30T11:57:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.037052 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.037115 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.037146 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.037165 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.037179 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:50Z","lastTransitionTime":"2026-01-30T11:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.085479 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.085476 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:50 crc kubenswrapper[4703]: E0130 11:57:50.085601 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.085491 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:50 crc kubenswrapper[4703]: E0130 11:57:50.085788 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:50 crc kubenswrapper[4703]: E0130 11:57:50.085835 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.139892 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.139976 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.139991 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.140013 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.140025 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:50Z","lastTransitionTime":"2026-01-30T11:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.247555 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.247613 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.247631 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.247653 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.247664 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:50Z","lastTransitionTime":"2026-01-30T11:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.248883 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 14:25:06.488562758 +0000 UTC Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.350412 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.350450 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.350462 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.350479 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.350491 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:50Z","lastTransitionTime":"2026-01-30T11:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.453070 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.453133 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.453144 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.453165 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.453176 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:50Z","lastTransitionTime":"2026-01-30T11:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.555566 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.555607 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.555616 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.555632 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.555645 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:50Z","lastTransitionTime":"2026-01-30T11:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.658710 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.658789 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.658803 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.658829 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.658846 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:50Z","lastTransitionTime":"2026-01-30T11:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.761717 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.761778 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.761788 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.761803 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.761812 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:50Z","lastTransitionTime":"2026-01-30T11:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.866808 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.866850 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.866862 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.866881 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.866893 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:50Z","lastTransitionTime":"2026-01-30T11:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.969182 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.969232 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.969244 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.969266 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:50 crc kubenswrapper[4703]: I0130 11:57:50.969280 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:50Z","lastTransitionTime":"2026-01-30T11:57:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.071957 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.071998 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.072008 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.072024 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.072035 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:51Z","lastTransitionTime":"2026-01-30T11:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.086398 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:51 crc kubenswrapper[4703]: E0130 11:57:51.086777 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.174489 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.174610 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.174619 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.174633 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.174643 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:51Z","lastTransitionTime":"2026-01-30T11:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.249393 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 21:13:18.265153056 +0000 UTC Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.278215 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.278285 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.278297 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.278317 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.278334 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:51Z","lastTransitionTime":"2026-01-30T11:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.381110 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.381209 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.381227 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.381254 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.381272 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:51Z","lastTransitionTime":"2026-01-30T11:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.484841 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.484898 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.484908 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.484929 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.484941 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:51Z","lastTransitionTime":"2026-01-30T11:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.587361 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.587434 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.587452 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.587478 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.587497 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:51Z","lastTransitionTime":"2026-01-30T11:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.691099 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.691195 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.691214 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.691241 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.691260 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:51Z","lastTransitionTime":"2026-01-30T11:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.794977 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.795040 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.795059 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.795089 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.795114 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:51Z","lastTransitionTime":"2026-01-30T11:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.898737 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.898781 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.898794 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.898816 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:51 crc kubenswrapper[4703]: I0130 11:57:51.898827 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:51Z","lastTransitionTime":"2026-01-30T11:57:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.002585 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.002643 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.002657 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.002681 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.002705 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:52Z","lastTransitionTime":"2026-01-30T11:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.064640 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs\") pod \"network-metrics-daemon-qrt92\" (UID: \"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\") " pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:52 crc kubenswrapper[4703]: E0130 11:57:52.064806 4703 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 11:57:52 crc kubenswrapper[4703]: E0130 11:57:52.064864 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs podName:ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd nodeName:}" failed. No retries permitted until 2026-01-30 11:58:56.064846918 +0000 UTC m=+171.842668572 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs") pod "network-metrics-daemon-qrt92" (UID: "ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.085308 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:52 crc kubenswrapper[4703]: E0130 11:57:52.085448 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.085519 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:52 crc kubenswrapper[4703]: E0130 11:57:52.085573 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.086445 4703 scope.go:117] "RemoveContainer" containerID="ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f" Jan 30 11:57:52 crc kubenswrapper[4703]: E0130 11:57:52.086677 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.086766 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:52 crc kubenswrapper[4703]: E0130 11:57:52.086855 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.109261 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-72zlj" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"874a5df5-f6bd-4111-aefa-f43e43e1fcc0\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:57:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://af38bdb6fc351ea256fbb5a368b87ad70202821a6a348e230d540c60694cc014\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:25Z\\\",\\\"message\\\":\\\"2026-01-30T11:56:39+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_75c06f9f-7039-423a-ae70-a5ed3bff69c2\\\\n2026-01-30T11:56:39+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_75c06f9f-7039-423a-ae70-a5ed3bff69c2 to /host/opt/cni/bin/\\\\n2026-01-30T11:56:40Z [verbose] multus-daemon started\\\\n2026-01-30T11:56:40Z [verbose] Readiness Indicator file check\\\\n2026-01-30T11:57:25Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:57:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52zng\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-72zlj\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.111304 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.111343 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.111359 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.111393 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.111410 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:52Z","lastTransitionTime":"2026-01-30T11:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.122688 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vzhfb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d5b3826b-50d0-4b94-bc51-42e5945b80ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c0bd07b3d314f68f07ed10089d8ec43556d4cfc2dba259fc6b01989778a7a075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4fxpq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vzhfb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.136732 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"33ae9b16-ffdb-4338-ba98-8da799fa7591\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bb1699595c372168389cb480f8c41f41a23f856d321138a04599628f1d4e19cd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bdf1201e129d6158cd5ad6dfb3e93f5ec2a5e75c738edd2dc3bd197e813d6ac5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://39eca81eb300b1b4d7a68731db92f76c91270b0bf49f7ae9bcf9643559bcb722\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://994c0a7b4893801aa46c57dd7938f28281368dca41f44ee26fa8f4fef3e74ab3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.158239 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"e547aae0-afa8-4515-a56d-d9632829c0bd\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ce7a3c6cecb06c3b1c48c12feaf2c5afb8df7b18a2bdf2749ebaac2c4398952a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2abd05b4fe2583b0862c3a746dfe5f395f579562e27fb2c5a5b9e45f15683b12\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d969f6a449492f80217365d4fef8a9e7c302491081301eac57b30ed0ea6ac65\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3cfbb3957994fbace415cea1e8381c0ec9ab6c036fe32768c63e957635694ccc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c96c2d502863efd645a409e26825cee60ad08f0993460e694008488a07f1cb06\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3e4862890012338bc9c73abe374c27de5319127544543a7a7bb6af992c287612\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3e4862890012338bc9c73abe374c27de5319127544543a7a7bb6af992c287612\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7c5c24e73e0378455aef55023c1da4bc5434e9c613367b6ee81854001c56e070\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c5c24e73e0378455aef55023c1da4bc5434e9c613367b6ee81854001c56e070\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://b8073aa1ab6fa6059ef8e0166bd98c5093e50995057da89705fbe84930521dac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b8073aa1ab6fa6059ef8e0166bd98c5093e50995057da89705fbe84930521dac\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.175053 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.187573 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-l8kf2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d1378c4-1c78-4865-9168-822ad86bae15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bf7d9ec9a13119518dca0133dcfd60a4abde3f28bd276abc57523fdad14ba6a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hdj8x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-l8kf2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.200672 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ffea6197-b1fb-427b-adc5-bcc1c6108235\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2b908a10a292c9c3864e3cdfb3c9fb6e8dce94fbb9b655cc8687fa5fbd018883\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w886f\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-cx2rm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.214753 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.214741 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4c34a2e-afef-43fa-af2f-24fba2afd001\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ca4cdadd36f3e4479c6957f86bd28947800e8c499b7a0990f3e303e7970b2d89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://321c3cb914c8dc421b62be78bc5ad4fac126d2a68c2b17510a0fcf11fc84e0d2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.214799 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.214808 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.214825 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.214835 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:52Z","lastTransitionTime":"2026-01-30T11:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.229032 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1a91d962a278ba035164c9f5ac54528dce5c5daa62006619c264a0345d992113\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.243549 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:26Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1bee01e0efeb8da878887ddb3f937041793f8a09b852a73035af3689009b20ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3356f9a18ad4f40cc636ac4a8baa371c189f68e7d63983eed4ae166fe0ec9f8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.249943 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 16:09:04.373221263 +0000 UTC Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.255997 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9824686a0f7a7cdb5f0b34e4ef60f4914988bbf79c9486744992a5edfe469985\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.276331 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"32108840-3d15-43ae-b3d1-fa5b8eb931c7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb9e804af8c3fbe8f92a5fb6c9177f0b255dff414a8a6b86184a715f4de45af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://08364535e5abb26d6e891f33a546a55e2eac872a80ec1a2c3094a17caed900be\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6567d4674a64771daf98e98551f615b4b358b88c151f0edf76bb131dcedca14d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f6c35591297ffceb48dfc62a21cbd55a03495df18fa756625eda66992afe16f4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0fc92828b0c86a995e34299ff0c9b666f86ec0fcad56e69b02efebedc468eeda\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21fea353449c51bd84a073e431e26cb72d5a906c6b16ce8a3c67ebcf860fffe2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c1994478d141e03a7bd1191bff998a3dcf33d5e234f272d69d2251d60891cef\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5l9xq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-9gsnx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.289366 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1bc71a36-b76e-48cf-a2a5-34b8251b7e15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adb0467b057eb5ef0beeb8216c4acf9a00233d27d04791d48ba9501577c4a97c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f23b988a0de0b2de02119281647abe355ee3d94738659c1d13979799dfaccda\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vlvjk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-g29v6\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.304216 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bf2f3ce3-7aeb-4736-a391-655b001a1785\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"g file observer\\\\nW0130 11:56:24.277700 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0130 11:56:24.278291 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0130 11:56:24.279795 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-78838720/tls.crt::/tmp/serving-cert-78838720/tls.key\\\\\\\"\\\\nI0130 11:56:24.626857 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0130 11:56:24.629478 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0130 11:56:24.629495 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0130 11:56:24.629511 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0130 11:56:24.629516 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0130 11:56:24.636209 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0130 11:56:24.636232 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0130 11:56:24.636237 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nI0130 11:56:24.636235 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0130 11:56:24.636242 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0130 11:56:24.636246 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0130 11:56:24.636250 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0130 11:56:24.636254 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0130 11:56:24.639152 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:08Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.318500 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.318618 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.318637 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.318658 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.318673 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:52Z","lastTransitionTime":"2026-01-30T11:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.320524 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c345f2d3-71b6-4200-a1aa-86f730b05b57\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ceb8f3b9ebed9a0f654d231c6bb862f2f37373419ba291bc64b4b618251a0b63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad0497a2d0598489d72b31c0e0116c743a748bdf6c1b382617c8fa0f3a1d8c59\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6e96b1e0e414357f93076273249eac721681b21fcffccc9b70853cce1154c15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:05Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.337165 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.354174 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:24Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.377002 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"06ffa267-20b9-4132-9f87-1218b111ebbc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-30T11:57:39Z\\\",\\\"message\\\":\\\"ner\\\\\\\":\\\\\\\"openshift-operator-lifecycle-manager/catalog-operator-metrics\\\\\\\"}\\\\nI0130 11:57:39.362887 6751 services_controller.go:360] Finished syncing service catalog-operator-metrics on namespace openshift-operator-lifecycle-manager for network=default : 2.581886ms\\\\nI0130 11:57:39.363073 6751 address_set.go:302] New(0d39bc5c-d5b9-432c-81be-2275bce5d7aa/default-network-controller:EgressIP:node-ips:v4:default/a712973235162149816) with []\\\\nI0130 11:57:39.363094 6751 address_set.go:302] New(aa6fc2dc-fab0-4812-b9da-809058e4dcf7/default-network-controller:EgressIP:egressip-served-pods:v4:default/a8519615025667110816) with []\\\\nI0130 11:57:39.363110 6751 address_set.go:302] New(bf133528-8652-4c84-85ff-881f0afe9837/default-network-controller:EgressService:egresssvc-served-pods:v4/a13607449821398607916) with []\\\\nI0130 11:57:39.363175 6751 factory.go:1336] Added *v1.Node event handler 7\\\\nI0130 11:57:39.363238 6751 factory.go:1336] Added *v1.EgressIP event handler 8\\\\nI0130 11:57:39.363596 6751 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI0130 11:57:39.363690 6751 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI0130 11:57:39.363760 6751 ovnkube.go:599] Stopped ovnkube\\\\nI0130 11:57:39.363814 6751 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0130 11:57:39.363886 6751 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-30T11:57:37Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-30T11:56:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-30T11:56:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-30T11:56:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dbx97\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-n7wnf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.388854 4703 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qrt92" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-30T11:56:48Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2pmjt\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-30T11:56:48Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qrt92\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-30T11:57:52Z is after 2025-08-24T17:21:41Z" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.420846 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.420886 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.420895 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.420911 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.420920 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:52Z","lastTransitionTime":"2026-01-30T11:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.524295 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.524338 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.524354 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.524371 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.524381 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:52Z","lastTransitionTime":"2026-01-30T11:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.626940 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.627023 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.627063 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.627097 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.627157 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:52Z","lastTransitionTime":"2026-01-30T11:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.730058 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.730174 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.730191 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.730205 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.730214 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:52Z","lastTransitionTime":"2026-01-30T11:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.833254 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.833297 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.833306 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.833322 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.833331 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:52Z","lastTransitionTime":"2026-01-30T11:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.936160 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.936226 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.936235 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.936249 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:52 crc kubenswrapper[4703]: I0130 11:57:52.936259 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:52Z","lastTransitionTime":"2026-01-30T11:57:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.038784 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.038842 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.038864 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.038883 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.038897 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:53Z","lastTransitionTime":"2026-01-30T11:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.085750 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:53 crc kubenswrapper[4703]: E0130 11:57:53.086020 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.142334 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.142390 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.142407 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.142432 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.142449 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:53Z","lastTransitionTime":"2026-01-30T11:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.245807 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.245881 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.245896 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.245947 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.245965 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:53Z","lastTransitionTime":"2026-01-30T11:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.250410 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 10:00:05.634771823 +0000 UTC Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.348952 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.349049 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.349062 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.349082 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.349094 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:53Z","lastTransitionTime":"2026-01-30T11:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.453244 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.453324 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.453344 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.453369 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.453388 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:53Z","lastTransitionTime":"2026-01-30T11:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.556608 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.556671 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.556689 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.556715 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.556733 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:53Z","lastTransitionTime":"2026-01-30T11:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.660495 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.660583 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.660606 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.660642 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.660671 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:53Z","lastTransitionTime":"2026-01-30T11:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.763620 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.763659 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.763671 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.763689 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.763697 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:53Z","lastTransitionTime":"2026-01-30T11:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.865659 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.865688 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.865697 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.865711 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.865721 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:53Z","lastTransitionTime":"2026-01-30T11:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.968397 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.968442 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.968468 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.968491 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:53 crc kubenswrapper[4703]: I0130 11:57:53.968505 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:53Z","lastTransitionTime":"2026-01-30T11:57:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.071010 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.071079 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.071089 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.071109 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.071141 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:54Z","lastTransitionTime":"2026-01-30T11:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.085705 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.085766 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:54 crc kubenswrapper[4703]: E0130 11:57:54.085888 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.085916 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:54 crc kubenswrapper[4703]: E0130 11:57:54.086090 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:54 crc kubenswrapper[4703]: E0130 11:57:54.086237 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.174610 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.174725 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.174750 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.174794 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.174830 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:54Z","lastTransitionTime":"2026-01-30T11:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.251336 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 11:01:25.676839498 +0000 UTC Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.277384 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.277494 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.277537 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.277570 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.277677 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:54Z","lastTransitionTime":"2026-01-30T11:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.294374 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.294424 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.294436 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.294450 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.294460 4703 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-30T11:57:54Z","lastTransitionTime":"2026-01-30T11:57:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.364407 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5"] Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.364858 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.367213 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.367633 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.368072 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.368961 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.390063 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-9gsnx" podStartSLOduration=82.390003492 podStartE2EDuration="1m22.390003492s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:57:54.388430566 +0000 UTC m=+110.166252240" watchObservedRunningTime="2026-01-30 11:57:54.390003492 +0000 UTC m=+110.167825156" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.403600 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-g29v6" podStartSLOduration=82.403579231 podStartE2EDuration="1m22.403579231s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:57:54.402444848 +0000 UTC m=+110.180266522" watchObservedRunningTime="2026-01-30 11:57:54.403579231 +0000 UTC m=+110.181400905" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.446988 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=89.446968856 podStartE2EDuration="1m29.446968856s" podCreationTimestamp="2026-01-30 11:56:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:57:54.427156454 +0000 UTC m=+110.204978108" watchObservedRunningTime="2026-01-30 11:57:54.446968856 +0000 UTC m=+110.224790510" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.447719 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=90.447713008 podStartE2EDuration="1m30.447713008s" podCreationTimestamp="2026-01-30 11:56:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:57:54.447335446 +0000 UTC m=+110.225157130" watchObservedRunningTime="2026-01-30 11:57:54.447713008 +0000 UTC m=+110.225534662" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.491309 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b836623-e479-4253-b3f2-2dca80c25ce1-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-phns5\" (UID: \"3b836623-e479-4253-b3f2-2dca80c25ce1\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.491914 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/3b836623-e479-4253-b3f2-2dca80c25ce1-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-phns5\" (UID: \"3b836623-e479-4253-b3f2-2dca80c25ce1\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.492033 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3b836623-e479-4253-b3f2-2dca80c25ce1-service-ca\") pod \"cluster-version-operator-5c965bbfc6-phns5\" (UID: \"3b836623-e479-4253-b3f2-2dca80c25ce1\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.492083 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/3b836623-e479-4253-b3f2-2dca80c25ce1-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-phns5\" (UID: \"3b836623-e479-4253-b3f2-2dca80c25ce1\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.492162 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3b836623-e479-4253-b3f2-2dca80c25ce1-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-phns5\" (UID: \"3b836623-e479-4253-b3f2-2dca80c25ce1\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.572845 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-72zlj" podStartSLOduration=82.572822952 podStartE2EDuration="1m22.572822952s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:57:54.561572682 +0000 UTC m=+110.339394356" watchObservedRunningTime="2026-01-30 11:57:54.572822952 +0000 UTC m=+110.350644606" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.573548 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-vzhfb" podStartSLOduration=83.573540943 podStartE2EDuration="1m23.573540943s" podCreationTimestamp="2026-01-30 11:56:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:57:54.572011298 +0000 UTC m=+110.349832952" watchObservedRunningTime="2026-01-30 11:57:54.573540943 +0000 UTC m=+110.351362607" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.585299 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=60.585283358 podStartE2EDuration="1m0.585283358s" podCreationTimestamp="2026-01-30 11:56:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:57:54.584855636 +0000 UTC m=+110.362677320" watchObservedRunningTime="2026-01-30 11:57:54.585283358 +0000 UTC m=+110.363105012" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.593531 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b836623-e479-4253-b3f2-2dca80c25ce1-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-phns5\" (UID: \"3b836623-e479-4253-b3f2-2dca80c25ce1\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.593576 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/3b836623-e479-4253-b3f2-2dca80c25ce1-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-phns5\" (UID: \"3b836623-e479-4253-b3f2-2dca80c25ce1\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.593683 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3b836623-e479-4253-b3f2-2dca80c25ce1-service-ca\") pod \"cluster-version-operator-5c965bbfc6-phns5\" (UID: \"3b836623-e479-4253-b3f2-2dca80c25ce1\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.593723 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/3b836623-e479-4253-b3f2-2dca80c25ce1-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-phns5\" (UID: \"3b836623-e479-4253-b3f2-2dca80c25ce1\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.593753 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3b836623-e479-4253-b3f2-2dca80c25ce1-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-phns5\" (UID: \"3b836623-e479-4253-b3f2-2dca80c25ce1\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.594177 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/3b836623-e479-4253-b3f2-2dca80c25ce1-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-phns5\" (UID: \"3b836623-e479-4253-b3f2-2dca80c25ce1\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.594360 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/3b836623-e479-4253-b3f2-2dca80c25ce1-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-phns5\" (UID: \"3b836623-e479-4253-b3f2-2dca80c25ce1\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.594857 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/3b836623-e479-4253-b3f2-2dca80c25ce1-service-ca\") pod \"cluster-version-operator-5c965bbfc6-phns5\" (UID: \"3b836623-e479-4253-b3f2-2dca80c25ce1\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.599671 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b836623-e479-4253-b3f2-2dca80c25ce1-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-phns5\" (UID: \"3b836623-e479-4253-b3f2-2dca80c25ce1\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.611182 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3b836623-e479-4253-b3f2-2dca80c25ce1-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-phns5\" (UID: \"3b836623-e479-4253-b3f2-2dca80c25ce1\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.615036 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=18.615019201 podStartE2EDuration="18.615019201s" podCreationTimestamp="2026-01-30 11:57:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:57:54.613860867 +0000 UTC m=+110.391682521" watchObservedRunningTime="2026-01-30 11:57:54.615019201 +0000 UTC m=+110.392840855" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.640930 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-l8kf2" podStartSLOduration=82.640913202 podStartE2EDuration="1m22.640913202s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:57:54.639646185 +0000 UTC m=+110.417467859" watchObservedRunningTime="2026-01-30 11:57:54.640913202 +0000 UTC m=+110.418734856" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.653400 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podStartSLOduration=82.653384228 podStartE2EDuration="1m22.653384228s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:57:54.653013187 +0000 UTC m=+110.430834841" watchObservedRunningTime="2026-01-30 11:57:54.653384228 +0000 UTC m=+110.431205902" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.666781 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=39.666762491 podStartE2EDuration="39.666762491s" podCreationTimestamp="2026-01-30 11:57:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:57:54.666176064 +0000 UTC m=+110.443997718" watchObservedRunningTime="2026-01-30 11:57:54.666762491 +0000 UTC m=+110.444584135" Jan 30 11:57:54 crc kubenswrapper[4703]: I0130 11:57:54.680109 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" Jan 30 11:57:55 crc kubenswrapper[4703]: I0130 11:57:55.086404 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:55 crc kubenswrapper[4703]: E0130 11:57:55.087354 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:55 crc kubenswrapper[4703]: I0130 11:57:55.231497 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" event={"ID":"3b836623-e479-4253-b3f2-2dca80c25ce1","Type":"ContainerStarted","Data":"0d3ce23e178839d4eb203eade0c79486a19eea3fff5f5e88f4fc62fc54965557"} Jan 30 11:57:55 crc kubenswrapper[4703]: I0130 11:57:55.231554 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" event={"ID":"3b836623-e479-4253-b3f2-2dca80c25ce1","Type":"ContainerStarted","Data":"53e5018bc4255645cf294972f9f41b72bb750354c084497271f5edae97c3b21a"} Jan 30 11:57:55 crc kubenswrapper[4703]: I0130 11:57:55.252525 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 14:35:53.152735595 +0000 UTC Jan 30 11:57:55 crc kubenswrapper[4703]: I0130 11:57:55.252590 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 30 11:57:55 crc kubenswrapper[4703]: I0130 11:57:55.255479 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-phns5" podStartSLOduration=83.25545383 podStartE2EDuration="1m23.25545383s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:57:55.252625807 +0000 UTC m=+111.030447471" watchObservedRunningTime="2026-01-30 11:57:55.25545383 +0000 UTC m=+111.033275484" Jan 30 11:57:55 crc kubenswrapper[4703]: I0130 11:57:55.262672 4703 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 30 11:57:56 crc kubenswrapper[4703]: I0130 11:57:56.085293 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:56 crc kubenswrapper[4703]: I0130 11:57:56.085585 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:56 crc kubenswrapper[4703]: E0130 11:57:56.085680 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:56 crc kubenswrapper[4703]: I0130 11:57:56.085622 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:56 crc kubenswrapper[4703]: E0130 11:57:56.085832 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:56 crc kubenswrapper[4703]: E0130 11:57:56.085992 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:57 crc kubenswrapper[4703]: I0130 11:57:57.085924 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:57 crc kubenswrapper[4703]: E0130 11:57:57.086053 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:57:58 crc kubenswrapper[4703]: I0130 11:57:58.085550 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:57:58 crc kubenswrapper[4703]: I0130 11:57:58.085575 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:57:58 crc kubenswrapper[4703]: E0130 11:57:58.085747 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:57:58 crc kubenswrapper[4703]: E0130 11:57:58.085747 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:57:58 crc kubenswrapper[4703]: I0130 11:57:58.085766 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:57:58 crc kubenswrapper[4703]: E0130 11:57:58.085963 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:57:59 crc kubenswrapper[4703]: I0130 11:57:59.086537 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:57:59 crc kubenswrapper[4703]: E0130 11:57:59.087677 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:00 crc kubenswrapper[4703]: I0130 11:58:00.085248 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:00 crc kubenswrapper[4703]: E0130 11:58:00.085350 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:00 crc kubenswrapper[4703]: I0130 11:58:00.085499 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:00 crc kubenswrapper[4703]: E0130 11:58:00.085545 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:00 crc kubenswrapper[4703]: I0130 11:58:00.086009 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:00 crc kubenswrapper[4703]: E0130 11:58:00.086342 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:01 crc kubenswrapper[4703]: I0130 11:58:01.085304 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:01 crc kubenswrapper[4703]: E0130 11:58:01.085743 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:02 crc kubenswrapper[4703]: I0130 11:58:02.085666 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:02 crc kubenswrapper[4703]: I0130 11:58:02.085779 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:02 crc kubenswrapper[4703]: E0130 11:58:02.085821 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:02 crc kubenswrapper[4703]: E0130 11:58:02.085971 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:02 crc kubenswrapper[4703]: I0130 11:58:02.086668 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:02 crc kubenswrapper[4703]: E0130 11:58:02.086881 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:03 crc kubenswrapper[4703]: I0130 11:58:03.085859 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:03 crc kubenswrapper[4703]: E0130 11:58:03.086201 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:04 crc kubenswrapper[4703]: I0130 11:58:04.085619 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:04 crc kubenswrapper[4703]: I0130 11:58:04.085642 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:04 crc kubenswrapper[4703]: E0130 11:58:04.085747 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:04 crc kubenswrapper[4703]: I0130 11:58:04.085814 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:04 crc kubenswrapper[4703]: E0130 11:58:04.086051 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:04 crc kubenswrapper[4703]: E0130 11:58:04.086228 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:04 crc kubenswrapper[4703]: I0130 11:58:04.086897 4703 scope.go:117] "RemoveContainer" containerID="ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f" Jan 30 11:58:04 crc kubenswrapper[4703]: E0130 11:58:04.087066 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" Jan 30 11:58:05 crc kubenswrapper[4703]: E0130 11:58:05.005188 4703 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 30 11:58:05 crc kubenswrapper[4703]: I0130 11:58:05.086682 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:05 crc kubenswrapper[4703]: E0130 11:58:05.086866 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:05 crc kubenswrapper[4703]: E0130 11:58:05.295672 4703 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 30 11:58:06 crc kubenswrapper[4703]: I0130 11:58:06.086021 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:06 crc kubenswrapper[4703]: I0130 11:58:06.086107 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:06 crc kubenswrapper[4703]: I0130 11:58:06.086233 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:06 crc kubenswrapper[4703]: E0130 11:58:06.086390 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:06 crc kubenswrapper[4703]: E0130 11:58:06.086524 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:06 crc kubenswrapper[4703]: E0130 11:58:06.086625 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:07 crc kubenswrapper[4703]: I0130 11:58:07.086193 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:07 crc kubenswrapper[4703]: E0130 11:58:07.086406 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:08 crc kubenswrapper[4703]: I0130 11:58:08.085996 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:08 crc kubenswrapper[4703]: I0130 11:58:08.086037 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:08 crc kubenswrapper[4703]: E0130 11:58:08.086180 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:08 crc kubenswrapper[4703]: I0130 11:58:08.086316 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:08 crc kubenswrapper[4703]: E0130 11:58:08.086450 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:08 crc kubenswrapper[4703]: E0130 11:58:08.086645 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:09 crc kubenswrapper[4703]: I0130 11:58:09.086230 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:09 crc kubenswrapper[4703]: E0130 11:58:09.086437 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:10 crc kubenswrapper[4703]: I0130 11:58:10.085656 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:10 crc kubenswrapper[4703]: I0130 11:58:10.085689 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:10 crc kubenswrapper[4703]: I0130 11:58:10.085656 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:10 crc kubenswrapper[4703]: E0130 11:58:10.085834 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:10 crc kubenswrapper[4703]: E0130 11:58:10.085982 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:10 crc kubenswrapper[4703]: E0130 11:58:10.086013 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:10 crc kubenswrapper[4703]: E0130 11:58:10.297229 4703 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 30 11:58:11 crc kubenswrapper[4703]: I0130 11:58:11.085538 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:11 crc kubenswrapper[4703]: E0130 11:58:11.085780 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:12 crc kubenswrapper[4703]: I0130 11:58:12.085452 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:12 crc kubenswrapper[4703]: I0130 11:58:12.085521 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:12 crc kubenswrapper[4703]: I0130 11:58:12.085574 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:12 crc kubenswrapper[4703]: E0130 11:58:12.085597 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:12 crc kubenswrapper[4703]: E0130 11:58:12.085705 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:12 crc kubenswrapper[4703]: E0130 11:58:12.085821 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:12 crc kubenswrapper[4703]: I0130 11:58:12.302272 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-72zlj_874a5df5-f6bd-4111-aefa-f43e43e1fcc0/kube-multus/1.log" Jan 30 11:58:12 crc kubenswrapper[4703]: I0130 11:58:12.302914 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-72zlj_874a5df5-f6bd-4111-aefa-f43e43e1fcc0/kube-multus/0.log" Jan 30 11:58:12 crc kubenswrapper[4703]: I0130 11:58:12.302967 4703 generic.go:334] "Generic (PLEG): container finished" podID="874a5df5-f6bd-4111-aefa-f43e43e1fcc0" containerID="af38bdb6fc351ea256fbb5a368b87ad70202821a6a348e230d540c60694cc014" exitCode=1 Jan 30 11:58:12 crc kubenswrapper[4703]: I0130 11:58:12.303013 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-72zlj" event={"ID":"874a5df5-f6bd-4111-aefa-f43e43e1fcc0","Type":"ContainerDied","Data":"af38bdb6fc351ea256fbb5a368b87ad70202821a6a348e230d540c60694cc014"} Jan 30 11:58:12 crc kubenswrapper[4703]: I0130 11:58:12.303065 4703 scope.go:117] "RemoveContainer" containerID="f78922d20c547b772c068e9509afb6008a8410a292bbef64d7c5c804c9a10890" Jan 30 11:58:12 crc kubenswrapper[4703]: I0130 11:58:12.303629 4703 scope.go:117] "RemoveContainer" containerID="af38bdb6fc351ea256fbb5a368b87ad70202821a6a348e230d540c60694cc014" Jan 30 11:58:12 crc kubenswrapper[4703]: E0130 11:58:12.303989 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-72zlj_openshift-multus(874a5df5-f6bd-4111-aefa-f43e43e1fcc0)\"" pod="openshift-multus/multus-72zlj" podUID="874a5df5-f6bd-4111-aefa-f43e43e1fcc0" Jan 30 11:58:13 crc kubenswrapper[4703]: I0130 11:58:13.086167 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:13 crc kubenswrapper[4703]: E0130 11:58:13.086569 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:13 crc kubenswrapper[4703]: I0130 11:58:13.308778 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-72zlj_874a5df5-f6bd-4111-aefa-f43e43e1fcc0/kube-multus/1.log" Jan 30 11:58:14 crc kubenswrapper[4703]: I0130 11:58:14.085648 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:14 crc kubenswrapper[4703]: I0130 11:58:14.085653 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:14 crc kubenswrapper[4703]: I0130 11:58:14.085758 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:14 crc kubenswrapper[4703]: E0130 11:58:14.086175 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:14 crc kubenswrapper[4703]: E0130 11:58:14.086045 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:14 crc kubenswrapper[4703]: E0130 11:58:14.086308 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:15 crc kubenswrapper[4703]: I0130 11:58:15.085927 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:15 crc kubenswrapper[4703]: E0130 11:58:15.088521 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:15 crc kubenswrapper[4703]: I0130 11:58:15.089093 4703 scope.go:117] "RemoveContainer" containerID="ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f" Jan 30 11:58:15 crc kubenswrapper[4703]: E0130 11:58:15.089431 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-n7wnf_openshift-ovn-kubernetes(06ffa267-20b9-4132-9f87-1218b111ebbc)\"" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" Jan 30 11:58:15 crc kubenswrapper[4703]: E0130 11:58:15.298932 4703 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 30 11:58:16 crc kubenswrapper[4703]: I0130 11:58:16.086429 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:16 crc kubenswrapper[4703]: I0130 11:58:16.086496 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:16 crc kubenswrapper[4703]: E0130 11:58:16.086627 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:16 crc kubenswrapper[4703]: I0130 11:58:16.086760 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:16 crc kubenswrapper[4703]: E0130 11:58:16.087462 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:16 crc kubenswrapper[4703]: E0130 11:58:16.087599 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:17 crc kubenswrapper[4703]: I0130 11:58:17.086075 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:17 crc kubenswrapper[4703]: E0130 11:58:17.086326 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:18 crc kubenswrapper[4703]: I0130 11:58:18.085838 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:18 crc kubenswrapper[4703]: I0130 11:58:18.085892 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:18 crc kubenswrapper[4703]: I0130 11:58:18.085933 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:18 crc kubenswrapper[4703]: E0130 11:58:18.086065 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:18 crc kubenswrapper[4703]: E0130 11:58:18.086269 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:18 crc kubenswrapper[4703]: E0130 11:58:18.086479 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:19 crc kubenswrapper[4703]: I0130 11:58:19.086287 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:19 crc kubenswrapper[4703]: E0130 11:58:19.086529 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:20 crc kubenswrapper[4703]: I0130 11:58:20.085593 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:20 crc kubenswrapper[4703]: I0130 11:58:20.085671 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:20 crc kubenswrapper[4703]: E0130 11:58:20.085769 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:20 crc kubenswrapper[4703]: I0130 11:58:20.085593 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:20 crc kubenswrapper[4703]: E0130 11:58:20.086070 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:20 crc kubenswrapper[4703]: E0130 11:58:20.086207 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:20 crc kubenswrapper[4703]: E0130 11:58:20.300600 4703 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 30 11:58:21 crc kubenswrapper[4703]: I0130 11:58:21.086365 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:21 crc kubenswrapper[4703]: E0130 11:58:21.086652 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:22 crc kubenswrapper[4703]: I0130 11:58:22.085939 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:22 crc kubenswrapper[4703]: I0130 11:58:22.086042 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:22 crc kubenswrapper[4703]: E0130 11:58:22.086213 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:22 crc kubenswrapper[4703]: E0130 11:58:22.086355 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:22 crc kubenswrapper[4703]: I0130 11:58:22.086497 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:22 crc kubenswrapper[4703]: E0130 11:58:22.086573 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:23 crc kubenswrapper[4703]: I0130 11:58:23.086025 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:23 crc kubenswrapper[4703]: E0130 11:58:23.086283 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:24 crc kubenswrapper[4703]: I0130 11:58:24.085983 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:24 crc kubenswrapper[4703]: I0130 11:58:24.086068 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:24 crc kubenswrapper[4703]: I0130 11:58:24.086098 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:24 crc kubenswrapper[4703]: E0130 11:58:24.086286 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:24 crc kubenswrapper[4703]: E0130 11:58:24.086420 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:24 crc kubenswrapper[4703]: E0130 11:58:24.086486 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:25 crc kubenswrapper[4703]: I0130 11:58:25.085455 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:25 crc kubenswrapper[4703]: E0130 11:58:25.087193 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:25 crc kubenswrapper[4703]: E0130 11:58:25.301908 4703 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 30 11:58:26 crc kubenswrapper[4703]: I0130 11:58:26.086231 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:26 crc kubenswrapper[4703]: I0130 11:58:26.086321 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:26 crc kubenswrapper[4703]: I0130 11:58:26.086346 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:26 crc kubenswrapper[4703]: E0130 11:58:26.086477 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:26 crc kubenswrapper[4703]: E0130 11:58:26.086571 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:26 crc kubenswrapper[4703]: E0130 11:58:26.086685 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:27 crc kubenswrapper[4703]: I0130 11:58:27.085892 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:27 crc kubenswrapper[4703]: E0130 11:58:27.086171 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:28 crc kubenswrapper[4703]: I0130 11:58:28.086031 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:28 crc kubenswrapper[4703]: E0130 11:58:28.086202 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:28 crc kubenswrapper[4703]: I0130 11:58:28.086031 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:28 crc kubenswrapper[4703]: I0130 11:58:28.086031 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:28 crc kubenswrapper[4703]: E0130 11:58:28.086313 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:28 crc kubenswrapper[4703]: E0130 11:58:28.086693 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:28 crc kubenswrapper[4703]: I0130 11:58:28.086962 4703 scope.go:117] "RemoveContainer" containerID="ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f" Jan 30 11:58:28 crc kubenswrapper[4703]: I0130 11:58:28.086995 4703 scope.go:117] "RemoveContainer" containerID="af38bdb6fc351ea256fbb5a368b87ad70202821a6a348e230d540c60694cc014" Jan 30 11:58:28 crc kubenswrapper[4703]: I0130 11:58:28.370036 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovnkube-controller/3.log" Jan 30 11:58:28 crc kubenswrapper[4703]: I0130 11:58:28.374588 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerStarted","Data":"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21"} Jan 30 11:58:28 crc kubenswrapper[4703]: I0130 11:58:28.375051 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:58:28 crc kubenswrapper[4703]: I0130 11:58:28.383359 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-72zlj_874a5df5-f6bd-4111-aefa-f43e43e1fcc0/kube-multus/1.log" Jan 30 11:58:28 crc kubenswrapper[4703]: I0130 11:58:28.383499 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-72zlj" event={"ID":"874a5df5-f6bd-4111-aefa-f43e43e1fcc0","Type":"ContainerStarted","Data":"7fbcc2e52627ea8ee7eb2c47a9d32d96b4388fac90edfd7cf994bb9dcfa14b6b"} Jan 30 11:58:28 crc kubenswrapper[4703]: I0130 11:58:28.419973 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podStartSLOduration=116.41993166 podStartE2EDuration="1m56.41993166s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:28.418197861 +0000 UTC m=+144.196019535" watchObservedRunningTime="2026-01-30 11:58:28.41993166 +0000 UTC m=+144.197753354" Jan 30 11:58:29 crc kubenswrapper[4703]: I0130 11:58:29.086401 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:29 crc kubenswrapper[4703]: E0130 11:58:29.086522 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:29 crc kubenswrapper[4703]: I0130 11:58:29.601007 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-qrt92"] Jan 30 11:58:29 crc kubenswrapper[4703]: I0130 11:58:29.601244 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:29 crc kubenswrapper[4703]: E0130 11:58:29.601377 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:30 crc kubenswrapper[4703]: I0130 11:58:30.086300 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:30 crc kubenswrapper[4703]: I0130 11:58:30.086343 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:30 crc kubenswrapper[4703]: E0130 11:58:30.086455 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:30 crc kubenswrapper[4703]: E0130 11:58:30.086591 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:30 crc kubenswrapper[4703]: E0130 11:58:30.303274 4703 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 30 11:58:31 crc kubenswrapper[4703]: I0130 11:58:31.085881 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:31 crc kubenswrapper[4703]: E0130 11:58:31.086091 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:32 crc kubenswrapper[4703]: I0130 11:58:32.086365 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:32 crc kubenswrapper[4703]: I0130 11:58:32.086481 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:32 crc kubenswrapper[4703]: E0130 11:58:32.086602 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:32 crc kubenswrapper[4703]: I0130 11:58:32.086700 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:32 crc kubenswrapper[4703]: E0130 11:58:32.086841 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:32 crc kubenswrapper[4703]: E0130 11:58:32.086991 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:32 crc kubenswrapper[4703]: I0130 11:58:32.990050 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:32 crc kubenswrapper[4703]: I0130 11:58:32.990231 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:32 crc kubenswrapper[4703]: I0130 11:58:32.990254 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:32 crc kubenswrapper[4703]: E0130 11:58:32.990361 4703 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:58:32 crc kubenswrapper[4703]: E0130 11:58:32.990409 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 12:00:34.990396733 +0000 UTC m=+270.768218377 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 30 11:58:32 crc kubenswrapper[4703]: E0130 11:58:32.990483 4703 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:58:32 crc kubenswrapper[4703]: E0130 11:58:32.990504 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 12:00:34.990477455 +0000 UTC m=+270.768299149 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:32 crc kubenswrapper[4703]: E0130 11:58:32.990643 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-30 12:00:34.990599608 +0000 UTC m=+270.768421342 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 30 11:58:33 crc kubenswrapper[4703]: I0130 11:58:33.085856 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:33 crc kubenswrapper[4703]: E0130 11:58:33.086012 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:33 crc kubenswrapper[4703]: I0130 11:58:33.091541 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:33 crc kubenswrapper[4703]: I0130 11:58:33.091585 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:33 crc kubenswrapper[4703]: E0130 11:58:33.091721 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:58:33 crc kubenswrapper[4703]: E0130 11:58:33.091721 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 30 11:58:33 crc kubenswrapper[4703]: E0130 11:58:33.091762 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:58:33 crc kubenswrapper[4703]: E0130 11:58:33.091795 4703 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:58:33 crc kubenswrapper[4703]: E0130 11:58:33.091736 4703 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 30 11:58:33 crc kubenswrapper[4703]: E0130 11:58:33.091849 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-30 12:00:35.091829525 +0000 UTC m=+270.869651289 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:58:33 crc kubenswrapper[4703]: E0130 11:58:33.092043 4703 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:58:33 crc kubenswrapper[4703]: E0130 11:58:33.092097 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-30 12:00:35.092079942 +0000 UTC m=+270.869901596 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 30 11:58:34 crc kubenswrapper[4703]: I0130 11:58:34.085714 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:34 crc kubenswrapper[4703]: I0130 11:58:34.085714 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:34 crc kubenswrapper[4703]: E0130 11:58:34.086427 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 30 11:58:34 crc kubenswrapper[4703]: E0130 11:58:34.086093 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qrt92" podUID="ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd" Jan 30 11:58:34 crc kubenswrapper[4703]: I0130 11:58:34.085757 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:34 crc kubenswrapper[4703]: E0130 11:58:34.086508 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 30 11:58:35 crc kubenswrapper[4703]: I0130 11:58:35.085347 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:35 crc kubenswrapper[4703]: E0130 11:58:35.087376 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 30 11:58:36 crc kubenswrapper[4703]: I0130 11:58:36.085943 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 11:58:36 crc kubenswrapper[4703]: I0130 11:58:36.085980 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 11:58:36 crc kubenswrapper[4703]: I0130 11:58:36.086374 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:36 crc kubenswrapper[4703]: I0130 11:58:36.088873 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 30 11:58:36 crc kubenswrapper[4703]: I0130 11:58:36.089159 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 30 11:58:36 crc kubenswrapper[4703]: I0130 11:58:36.089222 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 30 11:58:36 crc kubenswrapper[4703]: I0130 11:58:36.090366 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 30 11:58:37 crc kubenswrapper[4703]: I0130 11:58:37.085794 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 11:58:37 crc kubenswrapper[4703]: I0130 11:58:37.088331 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 30 11:58:37 crc kubenswrapper[4703]: I0130 11:58:37.089199 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 30 11:58:42 crc kubenswrapper[4703]: I0130 11:58:42.823866 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 11:58:42 crc kubenswrapper[4703]: I0130 11:58:42.823965 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.257116 4703 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.298662 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-x5w7b"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.299282 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.300159 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-d4tv6"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.300777 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.301246 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dv4qg"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.301843 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.304526 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.305264 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.305567 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zwbps"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.306656 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-xfvl5"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.306863 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-zwbps" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.307847 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.308621 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.309096 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bth9p"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.309156 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.309452 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bth9p" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.314912 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-gkllf"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.315914 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.318855 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.318888 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.318914 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.319332 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.319468 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.319580 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.320965 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qwk4"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.321532 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qwk4" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.321532 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.322309 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.322424 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.322477 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.322493 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.322625 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.322720 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.322917 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.322952 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.323164 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.323208 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.321937 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.323629 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.324346 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.324532 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.324597 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.324670 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.321993 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.321993 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.324978 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.325103 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.325177 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.325331 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.325422 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.325431 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.325639 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.325776 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.325909 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.326013 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.326095 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.326219 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.326256 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.326372 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.326388 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.326517 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.326587 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.327482 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.328227 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4c5n9"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.328510 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.328774 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-q7gv4"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.329714 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-q7gv4" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.330296 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4c5n9" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.336002 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.347825 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.348184 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.348504 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.348539 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.348652 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.348812 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.348841 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.348966 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.349150 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.350993 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.351375 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.351482 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.351547 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.351585 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.367371 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.367691 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.367410 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.368229 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.368318 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.368528 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.368771 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.368877 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.368916 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.368993 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.369065 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.368834 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.369284 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.369288 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.369647 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.369703 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.369748 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.369881 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.369899 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.369994 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.370115 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.370260 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.370285 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.370301 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.370340 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.370259 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.370416 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.370575 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.370654 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.370971 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.371535 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.371810 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.372168 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-mxjx6"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.372757 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.375704 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.376242 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2r4b9"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.377059 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.379182 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.380006 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.382338 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-vt7hb"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.383255 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.383612 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-sn2fc"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.384034 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-sn2fc" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.384921 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.402309 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.411559 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.412292 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pvx9n"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.413050 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pvx9n" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.413219 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-b8qs4"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.413814 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8qs4" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.414793 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-92h6w"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.415068 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-92h6w" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.425569 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.426047 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.426528 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.433302 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.433386 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.434114 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.435241 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.437712 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.438376 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.439338 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.441341 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.442370 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.442573 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c74cde9-f16e-4ee1-a881-868f6d3b9865-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bth9p\" (UID: \"5c74cde9-f16e-4ee1-a881-868f6d3b9865\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bth9p" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.442629 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/64f5798c-a6f3-4a7e-9b16-f3039aab5a23-serving-cert\") pod \"console-operator-58897d9998-zwbps\" (UID: \"64f5798c-a6f3-4a7e-9b16-f3039aab5a23\") " pod="openshift-console-operator/console-operator-58897d9998-zwbps" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.442671 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ht7rt\" (UniqueName: \"kubernetes.io/projected/3e848933-041a-4f18-89af-8f369b7eebcc-kube-api-access-ht7rt\") pod \"route-controller-manager-6576b87f9c-x28r8\" (UID: \"3e848933-041a-4f18-89af-8f369b7eebcc\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.442695 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64f5798c-a6f3-4a7e-9b16-f3039aab5a23-config\") pod \"console-operator-58897d9998-zwbps\" (UID: \"64f5798c-a6f3-4a7e-9b16-f3039aab5a23\") " pod="openshift-console-operator/console-operator-58897d9998-zwbps" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.442721 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dsl9t\" (UniqueName: \"kubernetes.io/projected/8ff6e057-d092-41f8-908a-7f718f8e7813-kube-api-access-dsl9t\") pod \"machine-api-operator-5694c8668f-x5w7b\" (UID: \"8ff6e057-d092-41f8-908a-7f718f8e7813\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.442745 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqvh8\" (UniqueName: \"kubernetes.io/projected/ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3-kube-api-access-nqvh8\") pod \"authentication-operator-69f744f599-xfvl5\" (UID: \"ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.442765 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3e848933-041a-4f18-89af-8f369b7eebcc-client-ca\") pod \"route-controller-manager-6576b87f9c-x28r8\" (UID: \"3e848933-041a-4f18-89af-8f369b7eebcc\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.442791 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/66c74144-f023-4e32-a378-d61b9cb574a5-audit-policies\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.442953 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-client-ca\") pod \"controller-manager-879f6c89f-dv4qg\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.442988 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sw576\" (UniqueName: \"kubernetes.io/projected/923a63f2-7c6f-4c55-a80f-ecf12184d88f-kube-api-access-sw576\") pod \"cluster-samples-operator-665b6dd947-8qwk4\" (UID: \"923a63f2-7c6f-4c55-a80f-ecf12184d88f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qwk4" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.443054 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3-serving-cert\") pod \"authentication-operator-69f744f599-xfvl5\" (UID: \"ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.443089 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zndrz\" (UniqueName: \"kubernetes.io/projected/66c74144-f023-4e32-a378-d61b9cb574a5-kube-api-access-zndrz\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.443113 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3-service-ca-bundle\") pod \"authentication-operator-69f744f599-xfvl5\" (UID: \"ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.443160 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2k8q\" (UniqueName: \"kubernetes.io/projected/1441806f-c27a-4a93-82e3-123caba174c5-kube-api-access-n2k8q\") pod \"downloads-7954f5f757-q7gv4\" (UID: \"1441806f-c27a-4a93-82e3-123caba174c5\") " pod="openshift-console/downloads-7954f5f757-q7gv4" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.448912 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e848933-041a-4f18-89af-8f369b7eebcc-config\") pod \"route-controller-manager-6576b87f9c-x28r8\" (UID: \"3e848933-041a-4f18-89af-8f369b7eebcc\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.449717 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.448953 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/f598eb59-e841-4a92-bd81-926bb698c44e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-6969t\" (UID: \"f598eb59-e841-4a92-bd81-926bb698c44e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.451326 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/66c74144-f023-4e32-a378-d61b9cb574a5-audit-dir\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.451429 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/66c74144-f023-4e32-a378-d61b9cb574a5-etcd-client\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.451458 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ff6e057-d092-41f8-908a-7f718f8e7813-config\") pod \"machine-api-operator-5694c8668f-x5w7b\" (UID: \"8ff6e057-d092-41f8-908a-7f718f8e7813\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.451482 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-serving-cert\") pod \"controller-manager-879f6c89f-dv4qg\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.451509 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-dv4qg\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.451539 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f598eb59-e841-4a92-bd81-926bb698c44e-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-6969t\" (UID: \"f598eb59-e841-4a92-bd81-926bb698c44e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.451558 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hg69j\" (UniqueName: \"kubernetes.io/projected/f598eb59-e841-4a92-bd81-926bb698c44e-kube-api-access-hg69j\") pod \"cluster-image-registry-operator-dc59b4c8b-6969t\" (UID: \"f598eb59-e841-4a92-bd81-926bb698c44e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462433 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c74cde9-f16e-4ee1-a881-868f6d3b9865-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bth9p\" (UID: \"5c74cde9-f16e-4ee1-a881-868f6d3b9865\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bth9p" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462497 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/988a730f-bd1f-45b8-97a9-c14bea7d749e-serving-cert\") pod \"openshift-config-operator-7777fb866f-g5vsj\" (UID: \"988a730f-bd1f-45b8-97a9-c14bea7d749e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462531 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4sk2\" (UniqueName: \"kubernetes.io/projected/68f47bdd-b01b-467d-914f-ef08276b4cc5-kube-api-access-n4sk2\") pod \"machine-approver-56656f9798-8dk4t\" (UID: \"68f47bdd-b01b-467d-914f-ef08276b4cc5\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462573 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-config\") pod \"controller-manager-879f6c89f-dv4qg\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462600 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmgkr\" (UniqueName: \"kubernetes.io/projected/5c74cde9-f16e-4ee1-a881-868f6d3b9865-kube-api-access-kmgkr\") pod \"openshift-controller-manager-operator-756b6f6bc6-bth9p\" (UID: \"5c74cde9-f16e-4ee1-a881-868f6d3b9865\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bth9p" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462628 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/8ff6e057-d092-41f8-908a-7f718f8e7813-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-x5w7b\" (UID: \"8ff6e057-d092-41f8-908a-7f718f8e7813\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462648 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25mx7\" (UniqueName: \"kubernetes.io/projected/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-kube-api-access-25mx7\") pod \"controller-manager-879f6c89f-dv4qg\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462667 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/64f5798c-a6f3-4a7e-9b16-f3039aab5a23-trusted-ca\") pod \"console-operator-58897d9998-zwbps\" (UID: \"64f5798c-a6f3-4a7e-9b16-f3039aab5a23\") " pod="openshift-console-operator/console-operator-58897d9998-zwbps" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462686 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfknw\" (UniqueName: \"kubernetes.io/projected/64f5798c-a6f3-4a7e-9b16-f3039aab5a23-kube-api-access-xfknw\") pod \"console-operator-58897d9998-zwbps\" (UID: \"64f5798c-a6f3-4a7e-9b16-f3039aab5a23\") " pod="openshift-console-operator/console-operator-58897d9998-zwbps" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462706 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/923a63f2-7c6f-4c55-a80f-ecf12184d88f-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-8qwk4\" (UID: \"923a63f2-7c6f-4c55-a80f-ecf12184d88f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qwk4" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462729 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3-config\") pod \"authentication-operator-69f744f599-xfvl5\" (UID: \"ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462749 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/68f47bdd-b01b-467d-914f-ef08276b4cc5-machine-approver-tls\") pod \"machine-approver-56656f9798-8dk4t\" (UID: \"68f47bdd-b01b-467d-914f-ef08276b4cc5\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462770 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8ff6e057-d092-41f8-908a-7f718f8e7813-images\") pod \"machine-api-operator-5694c8668f-x5w7b\" (UID: \"8ff6e057-d092-41f8-908a-7f718f8e7813\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462793 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/988a730f-bd1f-45b8-97a9-c14bea7d749e-available-featuregates\") pod \"openshift-config-operator-7777fb866f-g5vsj\" (UID: \"988a730f-bd1f-45b8-97a9-c14bea7d749e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462822 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/68f47bdd-b01b-467d-914f-ef08276b4cc5-auth-proxy-config\") pod \"machine-approver-56656f9798-8dk4t\" (UID: \"68f47bdd-b01b-467d-914f-ef08276b4cc5\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462849 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f598eb59-e841-4a92-bd81-926bb698c44e-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-6969t\" (UID: \"f598eb59-e841-4a92-bd81-926bb698c44e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462869 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/66c74144-f023-4e32-a378-d61b9cb574a5-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462888 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/68f47bdd-b01b-467d-914f-ef08276b4cc5-config\") pod \"machine-approver-56656f9798-8dk4t\" (UID: \"68f47bdd-b01b-467d-914f-ef08276b4cc5\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462911 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/66c74144-f023-4e32-a378-d61b9cb574a5-encryption-config\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462931 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-xfvl5\" (UID: \"ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462953 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g699n\" (UniqueName: \"kubernetes.io/projected/988a730f-bd1f-45b8-97a9-c14bea7d749e-kube-api-access-g699n\") pod \"openshift-config-operator-7777fb866f-g5vsj\" (UID: \"988a730f-bd1f-45b8-97a9-c14bea7d749e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.462999 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/66c74144-f023-4e32-a378-d61b9cb574a5-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.463026 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e848933-041a-4f18-89af-8f369b7eebcc-serving-cert\") pod \"route-controller-manager-6576b87f9c-x28r8\" (UID: \"3e848933-041a-4f18-89af-8f369b7eebcc\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.463058 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66c74144-f023-4e32-a378-d61b9cb574a5-serving-cert\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.464417 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.465146 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-qzs78"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.467276 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.468585 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.469093 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.469344 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-xmjbx"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.469776 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.474608 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-v286d"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.474744 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-xmjbx" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.475332 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jn8hp"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.475418 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-v286d" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.476454 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-jn8hp" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.476196 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxv85"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.477706 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.478563 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxv85" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.478883 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4xxv7"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.479495 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.479867 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.480105 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.480386 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4xxv7" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.480782 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.480890 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-9pqdt"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.480783 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.481443 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-9pqdt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.482889 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wd7g9"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.483441 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.484243 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.484396 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.484805 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.488845 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.489487 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.489674 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-v7hlb"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.489987 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-v7hlb" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.490589 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.491142 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.492715 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-x5w7b"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.494507 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dv4qg"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.495856 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zwbps"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.497536 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-d4tv6"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.502115 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.502464 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bth9p"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.503928 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.506113 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-wsjdp"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.507505 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.528615 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.530671 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4c5n9"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.532439 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-q7gv4"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.533047 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.540006 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-b8qs4"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.541837 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.544136 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2r4b9"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.547019 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.551896 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pvx9n"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.553395 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-mxjx6"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.554924 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-xfvl5"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.556405 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qwk4"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.560423 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-vt7hb"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.561830 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.561958 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.566838 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-sn2fc"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.568838 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4xxv7"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.570252 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-v286d"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.571881 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-92h6w"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.573196 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zndrz\" (UniqueName: \"kubernetes.io/projected/66c74144-f023-4e32-a378-d61b9cb574a5-kube-api-access-zndrz\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.573270 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3-service-ca-bundle\") pod \"authentication-operator-69f744f599-xfvl5\" (UID: \"ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.573320 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2k8q\" (UniqueName: \"kubernetes.io/projected/1441806f-c27a-4a93-82e3-123caba174c5-kube-api-access-n2k8q\") pod \"downloads-7954f5f757-q7gv4\" (UID: \"1441806f-c27a-4a93-82e3-123caba174c5\") " pod="openshift-console/downloads-7954f5f757-q7gv4" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.573347 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e848933-041a-4f18-89af-8f369b7eebcc-config\") pod \"route-controller-manager-6576b87f9c-x28r8\" (UID: \"3e848933-041a-4f18-89af-8f369b7eebcc\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.573391 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/f598eb59-e841-4a92-bd81-926bb698c44e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-6969t\" (UID: \"f598eb59-e841-4a92-bd81-926bb698c44e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.573518 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/66c74144-f023-4e32-a378-d61b9cb574a5-audit-dir\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.573435 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/66c74144-f023-4e32-a378-d61b9cb574a5-audit-dir\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.574694 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/66c74144-f023-4e32-a378-d61b9cb574a5-etcd-client\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.574738 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ff6e057-d092-41f8-908a-7f718f8e7813-config\") pod \"machine-api-operator-5694c8668f-x5w7b\" (UID: \"8ff6e057-d092-41f8-908a-7f718f8e7813\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.574760 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-serving-cert\") pod \"controller-manager-879f6c89f-dv4qg\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.574795 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-dv4qg\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.574850 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c74cde9-f16e-4ee1-a881-868f6d3b9865-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bth9p\" (UID: \"5c74cde9-f16e-4ee1-a881-868f6d3b9865\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bth9p" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.574928 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/988a730f-bd1f-45b8-97a9-c14bea7d749e-serving-cert\") pod \"openshift-config-operator-7777fb866f-g5vsj\" (UID: \"988a730f-bd1f-45b8-97a9-c14bea7d749e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.574984 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f598eb59-e841-4a92-bd81-926bb698c44e-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-6969t\" (UID: \"f598eb59-e841-4a92-bd81-926bb698c44e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.575046 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hg69j\" (UniqueName: \"kubernetes.io/projected/f598eb59-e841-4a92-bd81-926bb698c44e-kube-api-access-hg69j\") pod \"cluster-image-registry-operator-dc59b4c8b-6969t\" (UID: \"f598eb59-e841-4a92-bd81-926bb698c44e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.575071 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4sk2\" (UniqueName: \"kubernetes.io/projected/68f47bdd-b01b-467d-914f-ef08276b4cc5-kube-api-access-n4sk2\") pod \"machine-approver-56656f9798-8dk4t\" (UID: \"68f47bdd-b01b-467d-914f-ef08276b4cc5\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.575355 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-config\") pod \"controller-manager-879f6c89f-dv4qg\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.575575 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3-service-ca-bundle\") pod \"authentication-operator-69f744f599-xfvl5\" (UID: \"ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.575929 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e848933-041a-4f18-89af-8f369b7eebcc-config\") pod \"route-controller-manager-6576b87f9c-x28r8\" (UID: \"3e848933-041a-4f18-89af-8f369b7eebcc\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.576170 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmgkr\" (UniqueName: \"kubernetes.io/projected/5c74cde9-f16e-4ee1-a881-868f6d3b9865-kube-api-access-kmgkr\") pod \"openshift-controller-manager-operator-756b6f6bc6-bth9p\" (UID: \"5c74cde9-f16e-4ee1-a881-868f6d3b9865\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bth9p" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.576242 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfknw\" (UniqueName: \"kubernetes.io/projected/64f5798c-a6f3-4a7e-9b16-f3039aab5a23-kube-api-access-xfknw\") pod \"console-operator-58897d9998-zwbps\" (UID: \"64f5798c-a6f3-4a7e-9b16-f3039aab5a23\") " pod="openshift-console-operator/console-operator-58897d9998-zwbps" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.576260 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-dv4qg\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.576269 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/923a63f2-7c6f-4c55-a80f-ecf12184d88f-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-8qwk4\" (UID: \"923a63f2-7c6f-4c55-a80f-ecf12184d88f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qwk4" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.576292 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/8ff6e057-d092-41f8-908a-7f718f8e7813-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-x5w7b\" (UID: \"8ff6e057-d092-41f8-908a-7f718f8e7813\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.576310 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25mx7\" (UniqueName: \"kubernetes.io/projected/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-kube-api-access-25mx7\") pod \"controller-manager-879f6c89f-dv4qg\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.576328 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/64f5798c-a6f3-4a7e-9b16-f3039aab5a23-trusted-ca\") pod \"console-operator-58897d9998-zwbps\" (UID: \"64f5798c-a6f3-4a7e-9b16-f3039aab5a23\") " pod="openshift-console-operator/console-operator-58897d9998-zwbps" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.576347 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3-config\") pod \"authentication-operator-69f744f599-xfvl5\" (UID: \"ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.576391 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/68f47bdd-b01b-467d-914f-ef08276b4cc5-machine-approver-tls\") pod \"machine-approver-56656f9798-8dk4t\" (UID: \"68f47bdd-b01b-467d-914f-ef08276b4cc5\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.576457 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8ff6e057-d092-41f8-908a-7f718f8e7813-images\") pod \"machine-api-operator-5694c8668f-x5w7b\" (UID: \"8ff6e057-d092-41f8-908a-7f718f8e7813\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.576483 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/988a730f-bd1f-45b8-97a9-c14bea7d749e-available-featuregates\") pod \"openshift-config-operator-7777fb866f-g5vsj\" (UID: \"988a730f-bd1f-45b8-97a9-c14bea7d749e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.576509 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/68f47bdd-b01b-467d-914f-ef08276b4cc5-auth-proxy-config\") pod \"machine-approver-56656f9798-8dk4t\" (UID: \"68f47bdd-b01b-467d-914f-ef08276b4cc5\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.576535 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/68f47bdd-b01b-467d-914f-ef08276b4cc5-config\") pod \"machine-approver-56656f9798-8dk4t\" (UID: \"68f47bdd-b01b-467d-914f-ef08276b4cc5\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.576558 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f598eb59-e841-4a92-bd81-926bb698c44e-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-6969t\" (UID: \"f598eb59-e841-4a92-bd81-926bb698c44e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.576580 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/66c74144-f023-4e32-a378-d61b9cb574a5-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.576598 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/66c74144-f023-4e32-a378-d61b9cb574a5-encryption-config\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.576620 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g699n\" (UniqueName: \"kubernetes.io/projected/988a730f-bd1f-45b8-97a9-c14bea7d749e-kube-api-access-g699n\") pod \"openshift-config-operator-7777fb866f-g5vsj\" (UID: \"988a730f-bd1f-45b8-97a9-c14bea7d749e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.577991 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-xfvl5\" (UID: \"ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.577739 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-gkllf"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.578007 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/64f5798c-a6f3-4a7e-9b16-f3039aab5a23-trusted-ca\") pod \"console-operator-58897d9998-zwbps\" (UID: \"64f5798c-a6f3-4a7e-9b16-f3039aab5a23\") " pod="openshift-console-operator/console-operator-58897d9998-zwbps" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.578054 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-config\") pod \"controller-manager-879f6c89f-dv4qg\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.578394 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/66c74144-f023-4e32-a378-d61b9cb574a5-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.577249 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ff6e057-d092-41f8-908a-7f718f8e7813-config\") pod \"machine-api-operator-5694c8668f-x5w7b\" (UID: \"8ff6e057-d092-41f8-908a-7f718f8e7813\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.578837 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e848933-041a-4f18-89af-8f369b7eebcc-serving-cert\") pod \"route-controller-manager-6576b87f9c-x28r8\" (UID: \"3e848933-041a-4f18-89af-8f369b7eebcc\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.578906 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/68f47bdd-b01b-467d-914f-ef08276b4cc5-config\") pod \"machine-approver-56656f9798-8dk4t\" (UID: \"68f47bdd-b01b-467d-914f-ef08276b4cc5\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.579048 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/66c74144-f023-4e32-a378-d61b9cb574a5-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.579114 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f598eb59-e841-4a92-bd81-926bb698c44e-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-6969t\" (UID: \"f598eb59-e841-4a92-bd81-926bb698c44e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.579151 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66c74144-f023-4e32-a378-d61b9cb574a5-serving-cert\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.579212 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c74cde9-f16e-4ee1-a881-868f6d3b9865-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bth9p\" (UID: \"5c74cde9-f16e-4ee1-a881-868f6d3b9865\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bth9p" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.579239 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/64f5798c-a6f3-4a7e-9b16-f3039aab5a23-serving-cert\") pod \"console-operator-58897d9998-zwbps\" (UID: \"64f5798c-a6f3-4a7e-9b16-f3039aab5a23\") " pod="openshift-console-operator/console-operator-58897d9998-zwbps" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.579271 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ht7rt\" (UniqueName: \"kubernetes.io/projected/3e848933-041a-4f18-89af-8f369b7eebcc-kube-api-access-ht7rt\") pod \"route-controller-manager-6576b87f9c-x28r8\" (UID: \"3e848933-041a-4f18-89af-8f369b7eebcc\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.579288 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64f5798c-a6f3-4a7e-9b16-f3039aab5a23-config\") pod \"console-operator-58897d9998-zwbps\" (UID: \"64f5798c-a6f3-4a7e-9b16-f3039aab5a23\") " pod="openshift-console-operator/console-operator-58897d9998-zwbps" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.579592 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dsl9t\" (UniqueName: \"kubernetes.io/projected/8ff6e057-d092-41f8-908a-7f718f8e7813-kube-api-access-dsl9t\") pod \"machine-api-operator-5694c8668f-x5w7b\" (UID: \"8ff6e057-d092-41f8-908a-7f718f8e7813\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.579897 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c74cde9-f16e-4ee1-a881-868f6d3b9865-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bth9p\" (UID: \"5c74cde9-f16e-4ee1-a881-868f6d3b9865\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bth9p" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.580250 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/66c74144-f023-4e32-a378-d61b9cb574a5-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.580400 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-serving-cert\") pod \"controller-manager-879f6c89f-dv4qg\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.582515 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5c74cde9-f16e-4ee1-a881-868f6d3b9865-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bth9p\" (UID: \"5c74cde9-f16e-4ee1-a881-868f6d3b9865\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bth9p" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.582556 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqvh8\" (UniqueName: \"kubernetes.io/projected/ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3-kube-api-access-nqvh8\") pod \"authentication-operator-69f744f599-xfvl5\" (UID: \"ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.582562 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3-config\") pod \"authentication-operator-69f744f599-xfvl5\" (UID: \"ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.582587 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3e848933-041a-4f18-89af-8f369b7eebcc-client-ca\") pod \"route-controller-manager-6576b87f9c-x28r8\" (UID: \"3e848933-041a-4f18-89af-8f369b7eebcc\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.582661 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/66c74144-f023-4e32-a378-d61b9cb574a5-audit-policies\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.582674 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/66c74144-f023-4e32-a378-d61b9cb574a5-encryption-config\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.582686 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-client-ca\") pod \"controller-manager-879f6c89f-dv4qg\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.582753 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sw576\" (UniqueName: \"kubernetes.io/projected/923a63f2-7c6f-4c55-a80f-ecf12184d88f-kube-api-access-sw576\") pod \"cluster-samples-operator-665b6dd947-8qwk4\" (UID: \"923a63f2-7c6f-4c55-a80f-ecf12184d88f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qwk4" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.582794 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3-serving-cert\") pod \"authentication-operator-69f744f599-xfvl5\" (UID: \"ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.583155 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8ff6e057-d092-41f8-908a-7f718f8e7813-images\") pod \"machine-api-operator-5694c8668f-x5w7b\" (UID: \"8ff6e057-d092-41f8-908a-7f718f8e7813\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.583315 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/8ff6e057-d092-41f8-908a-7f718f8e7813-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-x5w7b\" (UID: \"8ff6e057-d092-41f8-908a-7f718f8e7813\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.583399 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/66c74144-f023-4e32-a378-d61b9cb574a5-audit-policies\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.583523 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3e848933-041a-4f18-89af-8f369b7eebcc-client-ca\") pod \"route-controller-manager-6576b87f9c-x28r8\" (UID: \"3e848933-041a-4f18-89af-8f369b7eebcc\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.583876 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.583917 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64f5798c-a6f3-4a7e-9b16-f3039aab5a23-config\") pod \"console-operator-58897d9998-zwbps\" (UID: \"64f5798c-a6f3-4a7e-9b16-f3039aab5a23\") " pod="openshift-console-operator/console-operator-58897d9998-zwbps" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.578871 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/68f47bdd-b01b-467d-914f-ef08276b4cc5-auth-proxy-config\") pod \"machine-approver-56656f9798-8dk4t\" (UID: \"68f47bdd-b01b-467d-914f-ef08276b4cc5\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.584047 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxv85"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.584704 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/66c74144-f023-4e32-a378-d61b9cb574a5-etcd-client\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.584611 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-client-ca\") pod \"controller-manager-879f6c89f-dv4qg\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.584896 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/f598eb59-e841-4a92-bd81-926bb698c44e-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-6969t\" (UID: \"f598eb59-e841-4a92-bd81-926bb698c44e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.584899 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66c74144-f023-4e32-a378-d61b9cb574a5-serving-cert\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.585313 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/68f47bdd-b01b-467d-914f-ef08276b4cc5-machine-approver-tls\") pod \"machine-approver-56656f9798-8dk4t\" (UID: \"68f47bdd-b01b-467d-914f-ef08276b4cc5\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.585428 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/988a730f-bd1f-45b8-97a9-c14bea7d749e-available-featuregates\") pod \"openshift-config-operator-7777fb866f-g5vsj\" (UID: \"988a730f-bd1f-45b8-97a9-c14bea7d749e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.585776 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/988a730f-bd1f-45b8-97a9-c14bea7d749e-serving-cert\") pod \"openshift-config-operator-7777fb866f-g5vsj\" (UID: \"988a730f-bd1f-45b8-97a9-c14bea7d749e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.585983 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/923a63f2-7c6f-4c55-a80f-ecf12184d88f-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-8qwk4\" (UID: \"923a63f2-7c6f-4c55-a80f-ecf12184d88f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qwk4" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.586239 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/64f5798c-a6f3-4a7e-9b16-f3039aab5a23-serving-cert\") pod \"console-operator-58897d9998-zwbps\" (UID: \"64f5798c-a6f3-4a7e-9b16-f3039aab5a23\") " pod="openshift-console-operator/console-operator-58897d9998-zwbps" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.586303 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.586382 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3-serving-cert\") pod \"authentication-operator-69f744f599-xfvl5\" (UID: \"ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.587248 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e848933-041a-4f18-89af-8f369b7eebcc-serving-cert\") pod \"route-controller-manager-6576b87f9c-x28r8\" (UID: \"3e848933-041a-4f18-89af-8f369b7eebcc\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.587481 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.587830 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-xfvl5\" (UID: \"ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.588555 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.589561 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-xmjbx"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.590601 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jn8hp"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.591603 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-5zq9g"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.592286 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-5zq9g" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.592672 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wd7g9"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.593642 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-v7hlb"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.594676 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-wsjdp"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.595666 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-zlb62"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.596408 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-zlb62" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.596717 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-9pqdt"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.597694 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-5zq9g"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.598719 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.599713 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-zlb62"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.601788 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.601944 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.603074 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.604028 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-n5kz2"] Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.604635 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-n5kz2" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.621753 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.645237 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.661170 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.680997 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.701483 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.722101 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.741091 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.761887 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.781939 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.801502 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.822209 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.841904 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.861754 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.882057 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.902606 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.921736 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.961970 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 30 11:58:45 crc kubenswrapper[4703]: I0130 11:58:45.982481 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.001777 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.028788 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.041345 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.062509 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.081818 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.102484 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.123766 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.142059 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.162919 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.182353 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.202179 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.221504 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.242557 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.262220 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.281801 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.301973 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.321920 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.342094 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.361567 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.382530 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.402211 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.422849 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.442494 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.462005 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.481911 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.500052 4703 request.go:700] Waited for 1.019372026s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operator-lifecycle-manager/configmaps?fieldSelector=metadata.name%3Dopenshift-service-ca.crt&limit=500&resourceVersion=0 Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.502399 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.522725 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.543500 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.561925 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.581715 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.602462 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.622830 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.642326 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.672037 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.681908 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.702352 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.721752 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.741548 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.763274 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.782814 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.802500 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.822048 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.842030 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.862099 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.891880 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.902757 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.922464 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.942918 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.963272 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 30 11:58:46 crc kubenswrapper[4703]: I0130 11:58:46.983028 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.002475 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.023163 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.042166 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.063863 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.082581 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.122942 4703 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.142492 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.162408 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.203009 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-trusted-ca\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.203070 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-installation-pull-secrets\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.203113 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.203184 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-registry-certificates\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.203352 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.203437 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/28191393-c4b1-4d80-9994-ca31868c9fb4-image-import-ca\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.203560 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zzvln\" (UniqueName: \"kubernetes.io/projected/aa4aaaa5-396e-4e62-92a3-74b835af58a7-kube-api-access-zzvln\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.203650 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-bound-sa-token\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.203756 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-serving-cert\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.203957 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.204014 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.204090 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-audit-policies\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.204174 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-oauth-serving-cert\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.204280 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a6520b48-04e3-4b95-8c18-0c51e2e41566-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-4c5n9\" (UID: \"a6520b48-04e3-4b95-8c18-0c51e2e41566\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4c5n9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.204332 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-registry-tls\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.204357 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.204379 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/28191393-c4b1-4d80-9994-ca31868c9fb4-trusted-ca-bundle\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.204428 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.204452 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/28191393-c4b1-4d80-9994-ca31868c9fb4-etcd-serving-ca\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.204490 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.204509 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.204544 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-config\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.204566 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-trusted-ca-bundle\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.204594 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.204616 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/28191393-c4b1-4d80-9994-ca31868c9fb4-node-pullsecrets\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.204640 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: E0130 11:58:47.204668 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:47.704644996 +0000 UTC m=+163.482466690 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.204714 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhth4\" (UniqueName: \"kubernetes.io/projected/56158b04-1a02-453d-b48a-a107343a3955-kube-api-access-xhth4\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.205079 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2k8q\" (UniqueName: \"kubernetes.io/projected/1441806f-c27a-4a93-82e3-123caba174c5-kube-api-access-n2k8q\") pod \"downloads-7954f5f757-q7gv4\" (UID: \"1441806f-c27a-4a93-82e3-123caba174c5\") " pod="openshift-console/downloads-7954f5f757-q7gv4" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.205217 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/28191393-c4b1-4d80-9994-ca31868c9fb4-audit-dir\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.205256 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28191393-c4b1-4d80-9994-ca31868c9fb4-config\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.205306 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.205344 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/28191393-c4b1-4d80-9994-ca31868c9fb4-audit\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.205375 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/28191393-c4b1-4d80-9994-ca31868c9fb4-etcd-client\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.205405 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-ca-trust-extracted\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.205435 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9nq8\" (UniqueName: \"kubernetes.io/projected/a6520b48-04e3-4b95-8c18-0c51e2e41566-kube-api-access-q9nq8\") pod \"openshift-apiserver-operator-796bbdcf4f-4c5n9\" (UID: \"a6520b48-04e3-4b95-8c18-0c51e2e41566\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4c5n9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.205511 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4spt\" (UniqueName: \"kubernetes.io/projected/28191393-c4b1-4d80-9994-ca31868c9fb4-kube-api-access-k4spt\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.205555 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-service-ca\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.205595 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/28191393-c4b1-4d80-9994-ca31868c9fb4-encryption-config\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.205670 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.205706 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmfk2\" (UniqueName: \"kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-kube-api-access-hmfk2\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.205739 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-oauth-config\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.205790 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/56158b04-1a02-453d-b48a-a107343a3955-audit-dir\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.205819 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/28191393-c4b1-4d80-9994-ca31868c9fb4-serving-cert\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.205849 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6520b48-04e3-4b95-8c18-0c51e2e41566-config\") pod \"openshift-apiserver-operator-796bbdcf4f-4c5n9\" (UID: \"a6520b48-04e3-4b95-8c18-0c51e2e41566\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4c5n9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.235921 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zndrz\" (UniqueName: \"kubernetes.io/projected/66c74144-f023-4e32-a378-d61b9cb574a5-kube-api-access-zndrz\") pod \"apiserver-7bbb656c7d-cz789\" (UID: \"66c74144-f023-4e32-a378-d61b9cb574a5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.246084 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/f598eb59-e841-4a92-bd81-926bb698c44e-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-6969t\" (UID: \"f598eb59-e841-4a92-bd81-926bb698c44e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.264227 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4sk2\" (UniqueName: \"kubernetes.io/projected/68f47bdd-b01b-467d-914f-ef08276b4cc5-kube-api-access-n4sk2\") pod \"machine-approver-56656f9798-8dk4t\" (UID: \"68f47bdd-b01b-467d-914f-ef08276b4cc5\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.286579 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hg69j\" (UniqueName: \"kubernetes.io/projected/f598eb59-e841-4a92-bd81-926bb698c44e-kube-api-access-hg69j\") pod \"cluster-image-registry-operator-dc59b4c8b-6969t\" (UID: \"f598eb59-e841-4a92-bd81-926bb698c44e\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.289927 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.306523 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:47 crc kubenswrapper[4703]: E0130 11:58:47.306826 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:47.806788817 +0000 UTC m=+163.584610501 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.307430 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0c06264a-b3b5-4784-9ed9-05fb2c937105-metrics-tls\") pod \"ingress-operator-5b745b69d9-8tl5d\" (UID: \"0c06264a-b3b5-4784-9ed9-05fb2c937105\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.307664 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/28191393-c4b1-4d80-9994-ca31868c9fb4-etcd-client\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.307829 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9nq8\" (UniqueName: \"kubernetes.io/projected/a6520b48-04e3-4b95-8c18-0c51e2e41566-kube-api-access-q9nq8\") pod \"openshift-apiserver-operator-796bbdcf4f-4c5n9\" (UID: \"a6520b48-04e3-4b95-8c18-0c51e2e41566\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4c5n9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.308065 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92rr9\" (UniqueName: \"kubernetes.io/projected/2eaf3e49-2394-495c-926e-5504ff81ccc5-kube-api-access-92rr9\") pod \"collect-profiles-29496225-zh786\" (UID: \"2eaf3e49-2394-495c-926e-5504ff81ccc5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.308346 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4spt\" (UniqueName: \"kubernetes.io/projected/28191393-c4b1-4d80-9994-ca31868c9fb4-kube-api-access-k4spt\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.308541 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/de735d2c-8817-415e-b190-df96bf922407-plugins-dir\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.308719 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45c9n\" (UniqueName: \"kubernetes.io/projected/e0ce7c98-966a-4d09-b401-f6cb42c1b08e-kube-api-access-45c9n\") pod \"dns-operator-744455d44c-jn8hp\" (UID: \"e0ce7c98-966a-4d09-b401-f6cb42c1b08e\") " pod="openshift-dns-operator/dns-operator-744455d44c-jn8hp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.308894 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jkxg\" (UniqueName: \"kubernetes.io/projected/8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba-kube-api-access-6jkxg\") pod \"packageserver-d55dfcdfc-9lxbz\" (UID: \"8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.309107 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/28191393-c4b1-4d80-9994-ca31868c9fb4-encryption-config\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.309502 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4hj9\" (UniqueName: \"kubernetes.io/projected/de735d2c-8817-415e-b190-df96bf922407-kube-api-access-w4hj9\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.309760 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmgkr\" (UniqueName: \"kubernetes.io/projected/5c74cde9-f16e-4ee1-a881-868f6d3b9865-kube-api-access-kmgkr\") pod \"openshift-controller-manager-operator-756b6f6bc6-bth9p\" (UID: \"5c74cde9-f16e-4ee1-a881-868f6d3b9865\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bth9p" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.310332 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmfk2\" (UniqueName: \"kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-kube-api-access-hmfk2\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.310743 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/2b49de6a-eb13-487d-b779-6cad65bbbc33-proxy-tls\") pod \"machine-config-operator-74547568cd-w7r6v\" (UID: \"2b49de6a-eb13-487d-b779-6cad65bbbc33\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.310913 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba-tmpfs\") pod \"packageserver-d55dfcdfc-9lxbz\" (UID: \"8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.311452 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/26730f5b-96f6-40f6-ab66-0500a306f988-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wd7g9\" (UID: \"26730f5b-96f6-40f6-ab66-0500a306f988\") " pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.311663 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/28191393-c4b1-4d80-9994-ca31868c9fb4-etcd-client\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.312238 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/897e6904-d07b-468a-9eff-504d8ce80db1-config\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.312503 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/56158b04-1a02-453d-b48a-a107343a3955-audit-dir\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.312820 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/897e6904-d07b-468a-9eff-504d8ce80db1-etcd-client\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.312745 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/56158b04-1a02-453d-b48a-a107343a3955-audit-dir\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.313407 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/115e8738-ea93-49f0-a85b-9c59933c940c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-pvx9n\" (UID: \"115e8738-ea93-49f0-a85b-9c59933c940c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pvx9n" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.313653 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47544af6-cdac-444e-9c16-37ded5e11e28-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxv85\" (UID: \"47544af6-cdac-444e-9c16-37ded5e11e28\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxv85" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.314037 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t429q\" (UniqueName: \"kubernetes.io/projected/194f460d-e3a1-409f-b6ca-7338f62025c0-kube-api-access-t429q\") pod \"catalog-operator-68c6474976-l4tvp\" (UID: \"194f460d-e3a1-409f-b6ca-7338f62025c0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.314436 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba-apiservice-cert\") pod \"packageserver-d55dfcdfc-9lxbz\" (UID: \"8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.314674 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-registry-certificates\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.316279 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-trusted-ca\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.315653 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/28191393-c4b1-4d80-9994-ca31868c9fb4-encryption-config\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.316341 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-installation-pull-secrets\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.316397 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcjr8\" (UniqueName: \"kubernetes.io/projected/26730f5b-96f6-40f6-ab66-0500a306f988-kube-api-access-rcjr8\") pod \"marketplace-operator-79b997595-wd7g9\" (UID: \"26730f5b-96f6-40f6-ab66-0500a306f988\") " pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.316453 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b887r\" (UniqueName: \"kubernetes.io/projected/40e464c0-96c5-4c69-8537-ef29b93319ab-kube-api-access-b887r\") pod \"control-plane-machine-set-operator-78cbb6b69f-92h6w\" (UID: \"40e464c0-96c5-4c69-8537-ef29b93319ab\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-92h6w" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.316515 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2rhr\" (UniqueName: \"kubernetes.io/projected/8a7284fa-8f0a-461c-9753-4636c4f6e3f0-kube-api-access-l2rhr\") pod \"service-ca-9c57cc56f-9pqdt\" (UID: \"8a7284fa-8f0a-461c-9753-4636c4f6e3f0\") " pod="openshift-service-ca/service-ca-9c57cc56f-9pqdt" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.316572 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/28191393-c4b1-4d80-9994-ca31868c9fb4-image-import-ca\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.316626 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/194f460d-e3a1-409f-b6ca-7338f62025c0-profile-collector-cert\") pod \"catalog-operator-68c6474976-l4tvp\" (UID: \"194f460d-e3a1-409f-b6ca-7338f62025c0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.316811 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2eaf3e49-2394-495c-926e-5504ff81ccc5-secret-volume\") pod \"collect-profiles-29496225-zh786\" (UID: \"2eaf3e49-2394-495c-926e-5504ff81ccc5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.316955 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3164cd87-2399-4b8f-8314-f663f2fc0b52-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-4xxv7\" (UID: \"3164cd87-2399-4b8f-8314-f663f2fc0b52\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4xxv7" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.317155 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-trusted-ca\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.317196 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb-config\") pod \"kube-apiserver-operator-766d6c64bb-v286d\" (UID: \"66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-v286d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.317256 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/47544af6-cdac-444e-9c16-37ded5e11e28-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxv85\" (UID: \"47544af6-cdac-444e-9c16-37ded5e11e28\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxv85" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.317343 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-bound-sa-token\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.317397 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-serving-cert\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.317456 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/47544af6-cdac-444e-9c16-37ded5e11e28-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxv85\" (UID: \"47544af6-cdac-444e-9c16-37ded5e11e28\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxv85" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.317509 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/db60e754-3b34-4d6d-8d4c-46384bc04d60-stats-auth\") pod \"router-default-5444994796-qzs78\" (UID: \"db60e754-3b34-4d6d-8d4c-46384bc04d60\") " pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.317563 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0c06264a-b3b5-4784-9ed9-05fb2c937105-bound-sa-token\") pod \"ingress-operator-5b745b69d9-8tl5d\" (UID: \"0c06264a-b3b5-4784-9ed9-05fb2c937105\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.317613 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b4ff5527-1a84-4f96-a063-b74f0e220eb9-config-volume\") pod \"dns-default-zlb62\" (UID: \"b4ff5527-1a84-4f96-a063-b74f0e220eb9\") " pod="openshift-dns/dns-default-zlb62" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.317702 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/de735d2c-8817-415e-b190-df96bf922407-registration-dir\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.317768 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.317818 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.317882 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-audit-policies\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.317932 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-oauth-serving-cert\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.318014 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/8a7284fa-8f0a-461c-9753-4636c4f6e3f0-signing-key\") pod \"service-ca-9c57cc56f-9pqdt\" (UID: \"8a7284fa-8f0a-461c-9753-4636c4f6e3f0\") " pod="openshift-service-ca/service-ca-9c57cc56f-9pqdt" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.318063 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/26730f5b-96f6-40f6-ab66-0500a306f988-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wd7g9\" (UID: \"26730f5b-96f6-40f6-ab66-0500a306f988\") " pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" Jan 30 11:58:47 crc kubenswrapper[4703]: E0130 11:58:47.318113 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:47.818094498 +0000 UTC m=+163.595916282 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.318314 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsh89\" (UniqueName: \"kubernetes.io/projected/b4ff5527-1a84-4f96-a063-b74f0e220eb9-kube-api-access-jsh89\") pod \"dns-default-zlb62\" (UID: \"b4ff5527-1a84-4f96-a063-b74f0e220eb9\") " pod="openshift-dns/dns-default-zlb62" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.319791 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-oauth-serving-cert\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.320175 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbbtl\" (UniqueName: \"kubernetes.io/projected/c271501b-03fc-4c15-b1d7-e705cbcf16eb-kube-api-access-cbbtl\") pod \"multus-admission-controller-857f4d67dd-xmjbx\" (UID: \"c271501b-03fc-4c15-b1d7-e705cbcf16eb\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-xmjbx" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.320223 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb27e0bb-49da-4e6c-a7f7-e8addc64b009-config\") pod \"service-ca-operator-777779d784-v7hlb\" (UID: \"bb27e0bb-49da-4e6c-a7f7-e8addc64b009\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-v7hlb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.320797 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.320875 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-trusted-ca-bundle\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.320941 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvzdx\" (UniqueName: \"kubernetes.io/projected/2ccbee61-4f6b-4070-ae06-071f1062ad2f-kube-api-access-wvzdx\") pod \"olm-operator-6b444d44fb-22qwf\" (UID: \"2ccbee61-4f6b-4070-ae06-071f1062ad2f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321017 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-installation-pull-secrets\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321061 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/146be6e7-a4cb-40c5-bda3-1e9b3c757118-certs\") pod \"machine-config-server-n5kz2\" (UID: \"146be6e7-a4cb-40c5-bda3-1e9b3c757118\") " pod="openshift-machine-config-operator/machine-config-server-n5kz2" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321177 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/28191393-c4b1-4d80-9994-ca31868c9fb4-node-pullsecrets\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321219 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/61fedc4c-7586-4cdc-b38e-be8a6f3762a1-cert\") pod \"ingress-canary-5zq9g\" (UID: \"61fedc4c-7586-4cdc-b38e-be8a6f3762a1\") " pod="openshift-ingress-canary/ingress-canary-5zq9g" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321258 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321301 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhth4\" (UniqueName: \"kubernetes.io/projected/56158b04-1a02-453d-b48a-a107343a3955-kube-api-access-xhth4\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321340 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/40e464c0-96c5-4c69-8537-ef29b93319ab-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-92h6w\" (UID: \"40e464c0-96c5-4c69-8537-ef29b93319ab\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-92h6w" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321531 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/28191393-c4b1-4d80-9994-ca31868c9fb4-audit-dir\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321586 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28191393-c4b1-4d80-9994-ca31868c9fb4-config\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321623 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1ac1debf-d20c-4a3b-abec-64c6e317cb69-proxy-tls\") pod \"machine-config-controller-84d6567774-x56jp\" (UID: \"1ac1debf-d20c-4a3b-abec-64c6e317cb69\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321656 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97rtk\" (UniqueName: \"kubernetes.io/projected/3164cd87-2399-4b8f-8314-f663f2fc0b52-kube-api-access-97rtk\") pod \"package-server-manager-789f6589d5-4xxv7\" (UID: \"3164cd87-2399-4b8f-8314-f663f2fc0b52\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4xxv7" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321693 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/28191393-c4b1-4d80-9994-ca31868c9fb4-audit\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321724 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/146be6e7-a4cb-40c5-bda3-1e9b3c757118-node-bootstrap-token\") pod \"machine-config-server-n5kz2\" (UID: \"146be6e7-a4cb-40c5-bda3-1e9b3c757118\") " pod="openshift-machine-config-operator/machine-config-server-n5kz2" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321759 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-ca-trust-extracted\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321787 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e0ce7c98-966a-4d09-b401-f6cb42c1b08e-metrics-tls\") pod \"dns-operator-744455d44c-jn8hp\" (UID: \"e0ce7c98-966a-4d09-b401-f6cb42c1b08e\") " pod="openshift-dns-operator/dns-operator-744455d44c-jn8hp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321847 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-service-ca\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321869 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbspk\" (UniqueName: \"kubernetes.io/projected/897e6904-d07b-468a-9eff-504d8ce80db1-kube-api-access-sbspk\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321959 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33681525-5de5-4f9e-a774-809bd0603c5a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-sn2fc\" (UID: \"33681525-5de5-4f9e-a774-809bd0603c5a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-sn2fc" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321983 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c271501b-03fc-4c15-b1d7-e705cbcf16eb-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-xmjbx\" (UID: \"c271501b-03fc-4c15-b1d7-e705cbcf16eb\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-xmjbx" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.321984 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/28191393-c4b1-4d80-9994-ca31868c9fb4-node-pullsecrets\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.322052 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-v286d\" (UID: \"66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-v286d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.322403 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/28191393-c4b1-4d80-9994-ca31868c9fb4-image-import-ca\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.323046 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/28191393-c4b1-4d80-9994-ca31868c9fb4-audit\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.323226 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.324056 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.324106 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-service-ca\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.324320 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/28191393-c4b1-4d80-9994-ca31868c9fb4-audit-dir\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.324563 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28191393-c4b1-4d80-9994-ca31868c9fb4-config\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.324710 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db60e754-3b34-4d6d-8d4c-46384bc04d60-service-ca-bundle\") pod \"router-default-5444994796-qzs78\" (UID: \"db60e754-3b34-4d6d-8d4c-46384bc04d60\") " pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.324762 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.324777 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-ca-trust-extracted\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.324794 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrdcs\" (UniqueName: \"kubernetes.io/projected/33681525-5de5-4f9e-a774-809bd0603c5a-kube-api-access-vrdcs\") pod \"kube-storage-version-migrator-operator-b67b599dd-sn2fc\" (UID: \"33681525-5de5-4f9e-a774-809bd0603c5a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-sn2fc" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.324828 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/8a7284fa-8f0a-461c-9753-4636c4f6e3f0-signing-cabundle\") pod \"service-ca-9c57cc56f-9pqdt\" (UID: \"8a7284fa-8f0a-461c-9753-4636c4f6e3f0\") " pod="openshift-service-ca/service-ca-9c57cc56f-9pqdt" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.324951 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-oauth-config\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.324986 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/115e8738-ea93-49f0-a85b-9c59933c940c-config\") pod \"kube-controller-manager-operator-78b949d7b-pvx9n\" (UID: \"115e8738-ea93-49f0-a85b-9c59933c940c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pvx9n" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.325344 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/28191393-c4b1-4d80-9994-ca31868c9fb4-serving-cert\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.325421 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d677h\" (UniqueName: \"kubernetes.io/projected/146be6e7-a4cb-40c5-bda3-1e9b3c757118-kube-api-access-d677h\") pod \"machine-config-server-n5kz2\" (UID: \"146be6e7-a4cb-40c5-bda3-1e9b3c757118\") " pod="openshift-machine-config-operator/machine-config-server-n5kz2" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.325448 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fk4tk\" (UniqueName: \"kubernetes.io/projected/2b49de6a-eb13-487d-b779-6cad65bbbc33-kube-api-access-fk4tk\") pod \"machine-config-operator-74547568cd-w7r6v\" (UID: \"2b49de6a-eb13-487d-b779-6cad65bbbc33\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.325603 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/194f460d-e3a1-409f-b6ca-7338f62025c0-srv-cert\") pod \"catalog-operator-68c6474976-l4tvp\" (UID: \"194f460d-e3a1-409f-b6ca-7338f62025c0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.325658 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6520b48-04e3-4b95-8c18-0c51e2e41566-config\") pod \"openshift-apiserver-operator-796bbdcf4f-4c5n9\" (UID: \"a6520b48-04e3-4b95-8c18-0c51e2e41566\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4c5n9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.325683 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/db60e754-3b34-4d6d-8d4c-46384bc04d60-metrics-certs\") pod \"router-default-5444994796-qzs78\" (UID: \"db60e754-3b34-4d6d-8d4c-46384bc04d60\") " pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.325752 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/897e6904-d07b-468a-9eff-504d8ce80db1-etcd-service-ca\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.325806 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.325899 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/de735d2c-8817-415e-b190-df96bf922407-socket-dir\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.325925 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.325948 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-v286d\" (UID: \"66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-v286d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.325971 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0c06264a-b3b5-4784-9ed9-05fb2c937105-trusted-ca\") pod \"ingress-operator-5b745b69d9-8tl5d\" (UID: \"0c06264a-b3b5-4784-9ed9-05fb2c937105\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.325998 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfhnp\" (UniqueName: \"kubernetes.io/projected/db60e754-3b34-4d6d-8d4c-46384bc04d60-kube-api-access-dfhnp\") pod \"router-default-5444994796-qzs78\" (UID: \"db60e754-3b34-4d6d-8d4c-46384bc04d60\") " pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.326650 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-trusted-ca-bundle\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.326835 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.326936 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zzvln\" (UniqueName: \"kubernetes.io/projected/aa4aaaa5-396e-4e62-92a3-74b835af58a7-kube-api-access-zzvln\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.326998 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7h45s\" (UniqueName: \"kubernetes.io/projected/1ac1debf-d20c-4a3b-abec-64c6e317cb69-kube-api-access-7h45s\") pod \"machine-config-controller-84d6567774-x56jp\" (UID: \"1ac1debf-d20c-4a3b-abec-64c6e317cb69\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.327097 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gl6z4\" (UniqueName: \"kubernetes.io/projected/61fedc4c-7586-4cdc-b38e-be8a6f3762a1-kube-api-access-gl6z4\") pod \"ingress-canary-5zq9g\" (UID: \"61fedc4c-7586-4cdc-b38e-be8a6f3762a1\") " pod="openshift-ingress-canary/ingress-canary-5zq9g" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.327254 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-audit-policies\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.327281 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2ccbee61-4f6b-4070-ae06-071f1062ad2f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-22qwf\" (UID: \"2ccbee61-4f6b-4070-ae06-071f1062ad2f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.327299 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-serving-cert\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.327434 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bb27e0bb-49da-4e6c-a7f7-e8addc64b009-serving-cert\") pod \"service-ca-operator-777779d784-v7hlb\" (UID: \"bb27e0bb-49da-4e6c-a7f7-e8addc64b009\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-v7hlb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.327592 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/115e8738-ea93-49f0-a85b-9c59933c940c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-pvx9n\" (UID: \"115e8738-ea93-49f0-a85b-9c59933c940c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pvx9n" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.327770 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-registry-certificates\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.327783 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1ac1debf-d20c-4a3b-abec-64c6e317cb69-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-x56jp\" (UID: \"1ac1debf-d20c-4a3b-abec-64c6e317cb69\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.327954 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba-webhook-cert\") pod \"packageserver-d55dfcdfc-9lxbz\" (UID: \"8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328044 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dq4rv\" (UniqueName: \"kubernetes.io/projected/bb27e0bb-49da-4e6c-a7f7-e8addc64b009-kube-api-access-dq4rv\") pod \"service-ca-operator-777779d784-v7hlb\" (UID: \"bb27e0bb-49da-4e6c-a7f7-e8addc64b009\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-v7hlb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328154 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9b9sr\" (UniqueName: \"kubernetes.io/projected/575b58cd-4c7e-4d84-bf3e-182ebe6232e8-kube-api-access-9b9sr\") pod \"migrator-59844c95c7-b8qs4\" (UID: \"575b58cd-4c7e-4d84-bf3e-182ebe6232e8\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8qs4" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328210 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6520b48-04e3-4b95-8c18-0c51e2e41566-config\") pod \"openshift-apiserver-operator-796bbdcf4f-4c5n9\" (UID: \"a6520b48-04e3-4b95-8c18-0c51e2e41566\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4c5n9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328271 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b4ff5527-1a84-4f96-a063-b74f0e220eb9-metrics-tls\") pod \"dns-default-zlb62\" (UID: \"b4ff5527-1a84-4f96-a063-b74f0e220eb9\") " pod="openshift-dns/dns-default-zlb62" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328389 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-registry-tls\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328479 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328540 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/28191393-c4b1-4d80-9994-ca31868c9fb4-trusted-ca-bundle\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328574 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a6520b48-04e3-4b95-8c18-0c51e2e41566-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-4c5n9\" (UID: \"a6520b48-04e3-4b95-8c18-0c51e2e41566\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4c5n9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328610 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328647 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2b49de6a-eb13-487d-b779-6cad65bbbc33-auth-proxy-config\") pod \"machine-config-operator-74547568cd-w7r6v\" (UID: \"2b49de6a-eb13-487d-b779-6cad65bbbc33\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328681 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/28191393-c4b1-4d80-9994-ca31868c9fb4-etcd-serving-ca\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328711 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/897e6904-d07b-468a-9eff-504d8ce80db1-etcd-ca\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328743 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2eaf3e49-2394-495c-926e-5504ff81ccc5-config-volume\") pod \"collect-profiles-29496225-zh786\" (UID: \"2eaf3e49-2394-495c-926e-5504ff81ccc5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328796 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328825 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-config\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328860 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/db60e754-3b34-4d6d-8d4c-46384bc04d60-default-certificate\") pod \"router-default-5444994796-qzs78\" (UID: \"db60e754-3b34-4d6d-8d4c-46384bc04d60\") " pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328898 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328929 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/de735d2c-8817-415e-b190-df96bf922407-csi-data-dir\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328961 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2b49de6a-eb13-487d-b779-6cad65bbbc33-images\") pod \"machine-config-operator-74547568cd-w7r6v\" (UID: \"2b49de6a-eb13-487d-b779-6cad65bbbc33\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328993 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2ccbee61-4f6b-4070-ae06-071f1062ad2f-srv-cert\") pod \"olm-operator-6b444d44fb-22qwf\" (UID: \"2ccbee61-4f6b-4070-ae06-071f1062ad2f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.329022 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8f5m\" (UniqueName: \"kubernetes.io/projected/0c06264a-b3b5-4784-9ed9-05fb2c937105-kube-api-access-r8f5m\") pod \"ingress-operator-5b745b69d9-8tl5d\" (UID: \"0c06264a-b3b5-4784-9ed9-05fb2c937105\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.329049 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/de735d2c-8817-415e-b190-df96bf922407-mountpoint-dir\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.329073 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/897e6904-d07b-468a-9eff-504d8ce80db1-serving-cert\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.329099 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.329177 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/33681525-5de5-4f9e-a774-809bd0603c5a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-sn2fc\" (UID: \"33681525-5de5-4f9e-a774-809bd0603c5a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-sn2fc" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.329371 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.328931 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25mx7\" (UniqueName: \"kubernetes.io/projected/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-kube-api-access-25mx7\") pod \"controller-manager-879f6c89f-dv4qg\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.329455 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/28191393-c4b1-4d80-9994-ca31868c9fb4-serving-cert\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.330001 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.330067 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/28191393-c4b1-4d80-9994-ca31868c9fb4-trusted-ca-bundle\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.330199 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-oauth-config\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.330314 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/28191393-c4b1-4d80-9994-ca31868c9fb4-etcd-serving-ca\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.330820 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-config\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.330821 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.331890 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.332386 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.333238 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.333620 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-registry-tls\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.333730 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.333867 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a6520b48-04e3-4b95-8c18-0c51e2e41566-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-4c5n9\" (UID: \"a6520b48-04e3-4b95-8c18-0c51e2e41566\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4c5n9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.335192 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.340105 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfknw\" (UniqueName: \"kubernetes.io/projected/64f5798c-a6f3-4a7e-9b16-f3039aab5a23-kube-api-access-xfknw\") pod \"console-operator-58897d9998-zwbps\" (UID: \"64f5798c-a6f3-4a7e-9b16-f3039aab5a23\") " pod="openshift-console-operator/console-operator-58897d9998-zwbps" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.345810 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.355477 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g699n\" (UniqueName: \"kubernetes.io/projected/988a730f-bd1f-45b8-97a9-c14bea7d749e-kube-api-access-g699n\") pod \"openshift-config-operator-7777fb866f-g5vsj\" (UID: \"988a730f-bd1f-45b8-97a9-c14bea7d749e\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" Jan 30 11:58:47 crc kubenswrapper[4703]: W0130 11:58:47.356377 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod68f47bdd_b01b_467d_914f_ef08276b4cc5.slice/crio-f98bb1aafbe0d6ab20ee44b251970361a06bec69c5a8f64db5191f43e014ec4d WatchSource:0}: Error finding container f98bb1aafbe0d6ab20ee44b251970361a06bec69c5a8f64db5191f43e014ec4d: Status 404 returned error can't find the container with id f98bb1aafbe0d6ab20ee44b251970361a06bec69c5a8f64db5191f43e014ec4d Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.357098 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-q7gv4" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.370030 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.378879 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dsl9t\" (UniqueName: \"kubernetes.io/projected/8ff6e057-d092-41f8-908a-7f718f8e7813-kube-api-access-dsl9t\") pod \"machine-api-operator-5694c8668f-x5w7b\" (UID: \"8ff6e057-d092-41f8-908a-7f718f8e7813\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.407521 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqvh8\" (UniqueName: \"kubernetes.io/projected/ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3-kube-api-access-nqvh8\") pod \"authentication-operator-69f744f599-xfvl5\" (UID: \"ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.416714 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.428508 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ht7rt\" (UniqueName: \"kubernetes.io/projected/3e848933-041a-4f18-89af-8f369b7eebcc-kube-api-access-ht7rt\") pod \"route-controller-manager-6576b87f9c-x28r8\" (UID: \"3e848933-041a-4f18-89af-8f369b7eebcc\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.430475 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.430916 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431096 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b4ff5527-1a84-4f96-a063-b74f0e220eb9-metrics-tls\") pod \"dns-default-zlb62\" (UID: \"b4ff5527-1a84-4f96-a063-b74f0e220eb9\") " pod="openshift-dns/dns-default-zlb62" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431148 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2b49de6a-eb13-487d-b779-6cad65bbbc33-auth-proxy-config\") pod \"machine-config-operator-74547568cd-w7r6v\" (UID: \"2b49de6a-eb13-487d-b779-6cad65bbbc33\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431167 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/897e6904-d07b-468a-9eff-504d8ce80db1-etcd-ca\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431182 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2eaf3e49-2394-495c-926e-5504ff81ccc5-config-volume\") pod \"collect-profiles-29496225-zh786\" (UID: \"2eaf3e49-2394-495c-926e-5504ff81ccc5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" Jan 30 11:58:47 crc kubenswrapper[4703]: E0130 11:58:47.431208 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:47.931187231 +0000 UTC m=+163.709008885 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431239 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/db60e754-3b34-4d6d-8d4c-46384bc04d60-default-certificate\") pod \"router-default-5444994796-qzs78\" (UID: \"db60e754-3b34-4d6d-8d4c-46384bc04d60\") " pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431276 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2ccbee61-4f6b-4070-ae06-071f1062ad2f-srv-cert\") pod \"olm-operator-6b444d44fb-22qwf\" (UID: \"2ccbee61-4f6b-4070-ae06-071f1062ad2f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431303 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/de735d2c-8817-415e-b190-df96bf922407-csi-data-dir\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431322 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2b49de6a-eb13-487d-b779-6cad65bbbc33-images\") pod \"machine-config-operator-74547568cd-w7r6v\" (UID: \"2b49de6a-eb13-487d-b779-6cad65bbbc33\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431340 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8f5m\" (UniqueName: \"kubernetes.io/projected/0c06264a-b3b5-4784-9ed9-05fb2c937105-kube-api-access-r8f5m\") pod \"ingress-operator-5b745b69d9-8tl5d\" (UID: \"0c06264a-b3b5-4784-9ed9-05fb2c937105\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431358 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/de735d2c-8817-415e-b190-df96bf922407-mountpoint-dir\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431375 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/897e6904-d07b-468a-9eff-504d8ce80db1-serving-cert\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431394 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/33681525-5de5-4f9e-a774-809bd0603c5a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-sn2fc\" (UID: \"33681525-5de5-4f9e-a774-809bd0603c5a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-sn2fc" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431410 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0c06264a-b3b5-4784-9ed9-05fb2c937105-metrics-tls\") pod \"ingress-operator-5b745b69d9-8tl5d\" (UID: \"0c06264a-b3b5-4784-9ed9-05fb2c937105\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431443 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92rr9\" (UniqueName: \"kubernetes.io/projected/2eaf3e49-2394-495c-926e-5504ff81ccc5-kube-api-access-92rr9\") pod \"collect-profiles-29496225-zh786\" (UID: \"2eaf3e49-2394-495c-926e-5504ff81ccc5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431459 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/de735d2c-8817-415e-b190-df96bf922407-plugins-dir\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431478 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45c9n\" (UniqueName: \"kubernetes.io/projected/e0ce7c98-966a-4d09-b401-f6cb42c1b08e-kube-api-access-45c9n\") pod \"dns-operator-744455d44c-jn8hp\" (UID: \"e0ce7c98-966a-4d09-b401-f6cb42c1b08e\") " pod="openshift-dns-operator/dns-operator-744455d44c-jn8hp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431494 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jkxg\" (UniqueName: \"kubernetes.io/projected/8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba-kube-api-access-6jkxg\") pod \"packageserver-d55dfcdfc-9lxbz\" (UID: \"8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431523 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4hj9\" (UniqueName: \"kubernetes.io/projected/de735d2c-8817-415e-b190-df96bf922407-kube-api-access-w4hj9\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431547 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/2b49de6a-eb13-487d-b779-6cad65bbbc33-proxy-tls\") pod \"machine-config-operator-74547568cd-w7r6v\" (UID: \"2b49de6a-eb13-487d-b779-6cad65bbbc33\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431565 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/897e6904-d07b-468a-9eff-504d8ce80db1-config\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431603 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba-tmpfs\") pod \"packageserver-d55dfcdfc-9lxbz\" (UID: \"8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431619 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/26730f5b-96f6-40f6-ab66-0500a306f988-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wd7g9\" (UID: \"26730f5b-96f6-40f6-ab66-0500a306f988\") " pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431669 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/897e6904-d07b-468a-9eff-504d8ce80db1-etcd-client\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431692 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/115e8738-ea93-49f0-a85b-9c59933c940c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-pvx9n\" (UID: \"115e8738-ea93-49f0-a85b-9c59933c940c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pvx9n" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431708 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47544af6-cdac-444e-9c16-37ded5e11e28-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxv85\" (UID: \"47544af6-cdac-444e-9c16-37ded5e11e28\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxv85" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431787 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t429q\" (UniqueName: \"kubernetes.io/projected/194f460d-e3a1-409f-b6ca-7338f62025c0-kube-api-access-t429q\") pod \"catalog-operator-68c6474976-l4tvp\" (UID: \"194f460d-e3a1-409f-b6ca-7338f62025c0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431832 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba-apiservice-cert\") pod \"packageserver-d55dfcdfc-9lxbz\" (UID: \"8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431873 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b887r\" (UniqueName: \"kubernetes.io/projected/40e464c0-96c5-4c69-8537-ef29b93319ab-kube-api-access-b887r\") pod \"control-plane-machine-set-operator-78cbb6b69f-92h6w\" (UID: \"40e464c0-96c5-4c69-8537-ef29b93319ab\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-92h6w" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431894 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcjr8\" (UniqueName: \"kubernetes.io/projected/26730f5b-96f6-40f6-ab66-0500a306f988-kube-api-access-rcjr8\") pod \"marketplace-operator-79b997595-wd7g9\" (UID: \"26730f5b-96f6-40f6-ab66-0500a306f988\") " pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431914 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2rhr\" (UniqueName: \"kubernetes.io/projected/8a7284fa-8f0a-461c-9753-4636c4f6e3f0-kube-api-access-l2rhr\") pod \"service-ca-9c57cc56f-9pqdt\" (UID: \"8a7284fa-8f0a-461c-9753-4636c4f6e3f0\") " pod="openshift-service-ca/service-ca-9c57cc56f-9pqdt" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431932 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/194f460d-e3a1-409f-b6ca-7338f62025c0-profile-collector-cert\") pod \"catalog-operator-68c6474976-l4tvp\" (UID: \"194f460d-e3a1-409f-b6ca-7338f62025c0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431948 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2eaf3e49-2394-495c-926e-5504ff81ccc5-secret-volume\") pod \"collect-profiles-29496225-zh786\" (UID: \"2eaf3e49-2394-495c-926e-5504ff81ccc5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.431965 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/47544af6-cdac-444e-9c16-37ded5e11e28-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxv85\" (UID: \"47544af6-cdac-444e-9c16-37ded5e11e28\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxv85" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432002 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3164cd87-2399-4b8f-8314-f663f2fc0b52-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-4xxv7\" (UID: \"3164cd87-2399-4b8f-8314-f663f2fc0b52\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4xxv7" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432007 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2eaf3e49-2394-495c-926e-5504ff81ccc5-config-volume\") pod \"collect-profiles-29496225-zh786\" (UID: \"2eaf3e49-2394-495c-926e-5504ff81ccc5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432033 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb-config\") pod \"kube-apiserver-operator-766d6c64bb-v286d\" (UID: \"66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-v286d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432050 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/db60e754-3b34-4d6d-8d4c-46384bc04d60-stats-auth\") pod \"router-default-5444994796-qzs78\" (UID: \"db60e754-3b34-4d6d-8d4c-46384bc04d60\") " pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432071 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/47544af6-cdac-444e-9c16-37ded5e11e28-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxv85\" (UID: \"47544af6-cdac-444e-9c16-37ded5e11e28\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxv85" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432096 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/de735d2c-8817-415e-b190-df96bf922407-registration-dir\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432138 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0c06264a-b3b5-4784-9ed9-05fb2c937105-bound-sa-token\") pod \"ingress-operator-5b745b69d9-8tl5d\" (UID: \"0c06264a-b3b5-4784-9ed9-05fb2c937105\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432156 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b4ff5527-1a84-4f96-a063-b74f0e220eb9-config-volume\") pod \"dns-default-zlb62\" (UID: \"b4ff5527-1a84-4f96-a063-b74f0e220eb9\") " pod="openshift-dns/dns-default-zlb62" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432182 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432207 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/8a7284fa-8f0a-461c-9753-4636c4f6e3f0-signing-key\") pod \"service-ca-9c57cc56f-9pqdt\" (UID: \"8a7284fa-8f0a-461c-9753-4636c4f6e3f0\") " pod="openshift-service-ca/service-ca-9c57cc56f-9pqdt" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432224 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/26730f5b-96f6-40f6-ab66-0500a306f988-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wd7g9\" (UID: \"26730f5b-96f6-40f6-ab66-0500a306f988\") " pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432246 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsh89\" (UniqueName: \"kubernetes.io/projected/b4ff5527-1a84-4f96-a063-b74f0e220eb9-kube-api-access-jsh89\") pod \"dns-default-zlb62\" (UID: \"b4ff5527-1a84-4f96-a063-b74f0e220eb9\") " pod="openshift-dns/dns-default-zlb62" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432264 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbbtl\" (UniqueName: \"kubernetes.io/projected/c271501b-03fc-4c15-b1d7-e705cbcf16eb-kube-api-access-cbbtl\") pod \"multus-admission-controller-857f4d67dd-xmjbx\" (UID: \"c271501b-03fc-4c15-b1d7-e705cbcf16eb\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-xmjbx" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432280 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb27e0bb-49da-4e6c-a7f7-e8addc64b009-config\") pod \"service-ca-operator-777779d784-v7hlb\" (UID: \"bb27e0bb-49da-4e6c-a7f7-e8addc64b009\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-v7hlb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432297 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/146be6e7-a4cb-40c5-bda3-1e9b3c757118-certs\") pod \"machine-config-server-n5kz2\" (UID: \"146be6e7-a4cb-40c5-bda3-1e9b3c757118\") " pod="openshift-machine-config-operator/machine-config-server-n5kz2" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432311 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvzdx\" (UniqueName: \"kubernetes.io/projected/2ccbee61-4f6b-4070-ae06-071f1062ad2f-kube-api-access-wvzdx\") pod \"olm-operator-6b444d44fb-22qwf\" (UID: \"2ccbee61-4f6b-4070-ae06-071f1062ad2f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432328 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/61fedc4c-7586-4cdc-b38e-be8a6f3762a1-cert\") pod \"ingress-canary-5zq9g\" (UID: \"61fedc4c-7586-4cdc-b38e-be8a6f3762a1\") " pod="openshift-ingress-canary/ingress-canary-5zq9g" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432348 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/40e464c0-96c5-4c69-8537-ef29b93319ab-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-92h6w\" (UID: \"40e464c0-96c5-4c69-8537-ef29b93319ab\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-92h6w" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432367 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/146be6e7-a4cb-40c5-bda3-1e9b3c757118-node-bootstrap-token\") pod \"machine-config-server-n5kz2\" (UID: \"146be6e7-a4cb-40c5-bda3-1e9b3c757118\") " pod="openshift-machine-config-operator/machine-config-server-n5kz2" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432382 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1ac1debf-d20c-4a3b-abec-64c6e317cb69-proxy-tls\") pod \"machine-config-controller-84d6567774-x56jp\" (UID: \"1ac1debf-d20c-4a3b-abec-64c6e317cb69\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432398 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97rtk\" (UniqueName: \"kubernetes.io/projected/3164cd87-2399-4b8f-8314-f663f2fc0b52-kube-api-access-97rtk\") pod \"package-server-manager-789f6589d5-4xxv7\" (UID: \"3164cd87-2399-4b8f-8314-f663f2fc0b52\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4xxv7" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432423 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e0ce7c98-966a-4d09-b401-f6cb42c1b08e-metrics-tls\") pod \"dns-operator-744455d44c-jn8hp\" (UID: \"e0ce7c98-966a-4d09-b401-f6cb42c1b08e\") " pod="openshift-dns-operator/dns-operator-744455d44c-jn8hp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432440 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbspk\" (UniqueName: \"kubernetes.io/projected/897e6904-d07b-468a-9eff-504d8ce80db1-kube-api-access-sbspk\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432454 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-v286d\" (UID: \"66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-v286d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432469 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33681525-5de5-4f9e-a774-809bd0603c5a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-sn2fc\" (UID: \"33681525-5de5-4f9e-a774-809bd0603c5a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-sn2fc" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432483 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c271501b-03fc-4c15-b1d7-e705cbcf16eb-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-xmjbx\" (UID: \"c271501b-03fc-4c15-b1d7-e705cbcf16eb\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-xmjbx" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432499 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/8a7284fa-8f0a-461c-9753-4636c4f6e3f0-signing-cabundle\") pod \"service-ca-9c57cc56f-9pqdt\" (UID: \"8a7284fa-8f0a-461c-9753-4636c4f6e3f0\") " pod="openshift-service-ca/service-ca-9c57cc56f-9pqdt" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432515 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db60e754-3b34-4d6d-8d4c-46384bc04d60-service-ca-bundle\") pod \"router-default-5444994796-qzs78\" (UID: \"db60e754-3b34-4d6d-8d4c-46384bc04d60\") " pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432531 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrdcs\" (UniqueName: \"kubernetes.io/projected/33681525-5de5-4f9e-a774-809bd0603c5a-kube-api-access-vrdcs\") pod \"kube-storage-version-migrator-operator-b67b599dd-sn2fc\" (UID: \"33681525-5de5-4f9e-a774-809bd0603c5a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-sn2fc" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432548 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/115e8738-ea93-49f0-a85b-9c59933c940c-config\") pod \"kube-controller-manager-operator-78b949d7b-pvx9n\" (UID: \"115e8738-ea93-49f0-a85b-9c59933c940c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pvx9n" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432565 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/194f460d-e3a1-409f-b6ca-7338f62025c0-srv-cert\") pod \"catalog-operator-68c6474976-l4tvp\" (UID: \"194f460d-e3a1-409f-b6ca-7338f62025c0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432580 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d677h\" (UniqueName: \"kubernetes.io/projected/146be6e7-a4cb-40c5-bda3-1e9b3c757118-kube-api-access-d677h\") pod \"machine-config-server-n5kz2\" (UID: \"146be6e7-a4cb-40c5-bda3-1e9b3c757118\") " pod="openshift-machine-config-operator/machine-config-server-n5kz2" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432584 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2b49de6a-eb13-487d-b779-6cad65bbbc33-auth-proxy-config\") pod \"machine-config-operator-74547568cd-w7r6v\" (UID: \"2b49de6a-eb13-487d-b779-6cad65bbbc33\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432596 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fk4tk\" (UniqueName: \"kubernetes.io/projected/2b49de6a-eb13-487d-b779-6cad65bbbc33-kube-api-access-fk4tk\") pod \"machine-config-operator-74547568cd-w7r6v\" (UID: \"2b49de6a-eb13-487d-b779-6cad65bbbc33\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432618 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/db60e754-3b34-4d6d-8d4c-46384bc04d60-metrics-certs\") pod \"router-default-5444994796-qzs78\" (UID: \"db60e754-3b34-4d6d-8d4c-46384bc04d60\") " pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432641 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/de735d2c-8817-415e-b190-df96bf922407-socket-dir\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432656 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/897e6904-d07b-468a-9eff-504d8ce80db1-etcd-service-ca\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432677 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-v286d\" (UID: \"66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-v286d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432698 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0c06264a-b3b5-4784-9ed9-05fb2c937105-trusted-ca\") pod \"ingress-operator-5b745b69d9-8tl5d\" (UID: \"0c06264a-b3b5-4784-9ed9-05fb2c937105\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432720 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfhnp\" (UniqueName: \"kubernetes.io/projected/db60e754-3b34-4d6d-8d4c-46384bc04d60-kube-api-access-dfhnp\") pod \"router-default-5444994796-qzs78\" (UID: \"db60e754-3b34-4d6d-8d4c-46384bc04d60\") " pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432749 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7h45s\" (UniqueName: \"kubernetes.io/projected/1ac1debf-d20c-4a3b-abec-64c6e317cb69-kube-api-access-7h45s\") pod \"machine-config-controller-84d6567774-x56jp\" (UID: \"1ac1debf-d20c-4a3b-abec-64c6e317cb69\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432768 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gl6z4\" (UniqueName: \"kubernetes.io/projected/61fedc4c-7586-4cdc-b38e-be8a6f3762a1-kube-api-access-gl6z4\") pod \"ingress-canary-5zq9g\" (UID: \"61fedc4c-7586-4cdc-b38e-be8a6f3762a1\") " pod="openshift-ingress-canary/ingress-canary-5zq9g" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432798 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2ccbee61-4f6b-4070-ae06-071f1062ad2f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-22qwf\" (UID: \"2ccbee61-4f6b-4070-ae06-071f1062ad2f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432818 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bb27e0bb-49da-4e6c-a7f7-e8addc64b009-serving-cert\") pod \"service-ca-operator-777779d784-v7hlb\" (UID: \"bb27e0bb-49da-4e6c-a7f7-e8addc64b009\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-v7hlb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432844 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/115e8738-ea93-49f0-a85b-9c59933c940c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-pvx9n\" (UID: \"115e8738-ea93-49f0-a85b-9c59933c940c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pvx9n" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432878 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1ac1debf-d20c-4a3b-abec-64c6e317cb69-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-x56jp\" (UID: \"1ac1debf-d20c-4a3b-abec-64c6e317cb69\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432899 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba-webhook-cert\") pod \"packageserver-d55dfcdfc-9lxbz\" (UID: \"8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432915 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dq4rv\" (UniqueName: \"kubernetes.io/projected/bb27e0bb-49da-4e6c-a7f7-e8addc64b009-kube-api-access-dq4rv\") pod \"service-ca-operator-777779d784-v7hlb\" (UID: \"bb27e0bb-49da-4e6c-a7f7-e8addc64b009\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-v7hlb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.432932 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9b9sr\" (UniqueName: \"kubernetes.io/projected/575b58cd-4c7e-4d84-bf3e-182ebe6232e8-kube-api-access-9b9sr\") pod \"migrator-59844c95c7-b8qs4\" (UID: \"575b58cd-4c7e-4d84-bf3e-182ebe6232e8\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8qs4" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.433098 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/897e6904-d07b-468a-9eff-504d8ce80db1-etcd-ca\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.436509 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/26730f5b-96f6-40f6-ab66-0500a306f988-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wd7g9\" (UID: \"26730f5b-96f6-40f6-ab66-0500a306f988\") " pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.437185 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bb27e0bb-49da-4e6c-a7f7-e8addc64b009-config\") pod \"service-ca-operator-777779d784-v7hlb\" (UID: \"bb27e0bb-49da-4e6c-a7f7-e8addc64b009\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-v7hlb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.437519 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1ac1debf-d20c-4a3b-abec-64c6e317cb69-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-x56jp\" (UID: \"1ac1debf-d20c-4a3b-abec-64c6e317cb69\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.438065 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/8a7284fa-8f0a-461c-9753-4636c4f6e3f0-signing-key\") pod \"service-ca-9c57cc56f-9pqdt\" (UID: \"8a7284fa-8f0a-461c-9753-4636c4f6e3f0\") " pod="openshift-service-ca/service-ca-9c57cc56f-9pqdt" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.438722 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/47544af6-cdac-444e-9c16-37ded5e11e28-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxv85\" (UID: \"47544af6-cdac-444e-9c16-37ded5e11e28\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxv85" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.439733 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/115e8738-ea93-49f0-a85b-9c59933c940c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-pvx9n\" (UID: \"115e8738-ea93-49f0-a85b-9c59933c940c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pvx9n" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.440944 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/de735d2c-8817-415e-b190-df96bf922407-plugins-dir\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.441066 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/de735d2c-8817-415e-b190-df96bf922407-csi-data-dir\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.496605 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.497774 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/db60e754-3b34-4d6d-8d4c-46384bc04d60-default-certificate\") pod \"router-default-5444994796-qzs78\" (UID: \"db60e754-3b34-4d6d-8d4c-46384bc04d60\") " pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.497860 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/2ccbee61-4f6b-4070-ae06-071f1062ad2f-srv-cert\") pod \"olm-operator-6b444d44fb-22qwf\" (UID: \"2ccbee61-4f6b-4070-ae06-071f1062ad2f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.498343 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0c06264a-b3b5-4784-9ed9-05fb2c937105-trusted-ca\") pod \"ingress-operator-5b745b69d9-8tl5d\" (UID: \"0c06264a-b3b5-4784-9ed9-05fb2c937105\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.498570 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.498808 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/40e464c0-96c5-4c69-8537-ef29b93319ab-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-92h6w\" (UID: \"40e464c0-96c5-4c69-8537-ef29b93319ab\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-92h6w" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.498813 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sw576\" (UniqueName: \"kubernetes.io/projected/923a63f2-7c6f-4c55-a80f-ecf12184d88f-kube-api-access-sw576\") pod \"cluster-samples-operator-665b6dd947-8qwk4\" (UID: \"923a63f2-7c6f-4c55-a80f-ecf12184d88f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qwk4" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.498926 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.499176 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba-webhook-cert\") pod \"packageserver-d55dfcdfc-9lxbz\" (UID: \"8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.499457 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.499784 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2b49de6a-eb13-487d-b779-6cad65bbbc33-images\") pod \"machine-config-operator-74547568cd-w7r6v\" (UID: \"2b49de6a-eb13-487d-b779-6cad65bbbc33\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.499851 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/897e6904-d07b-468a-9eff-504d8ce80db1-etcd-service-ca\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.500213 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/115e8738-ea93-49f0-a85b-9c59933c940c-config\") pod \"kube-controller-manager-operator-78b949d7b-pvx9n\" (UID: \"115e8738-ea93-49f0-a85b-9c59933c940c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pvx9n" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.500524 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bb27e0bb-49da-4e6c-a7f7-e8addc64b009-serving-cert\") pod \"service-ca-operator-777779d784-v7hlb\" (UID: \"bb27e0bb-49da-4e6c-a7f7-e8addc64b009\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-v7hlb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.500679 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/de735d2c-8817-415e-b190-df96bf922407-socket-dir\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.500811 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/2ccbee61-4f6b-4070-ae06-071f1062ad2f-profile-collector-cert\") pod \"olm-operator-6b444d44fb-22qwf\" (UID: \"2ccbee61-4f6b-4070-ae06-071f1062ad2f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.500986 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb-config\") pod \"kube-apiserver-operator-766d6c64bb-v286d\" (UID: \"66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-v286d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.501619 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/8a7284fa-8f0a-461c-9753-4636c4f6e3f0-signing-cabundle\") pod \"service-ca-9c57cc56f-9pqdt\" (UID: \"8a7284fa-8f0a-461c-9753-4636c4f6e3f0\") " pod="openshift-service-ca/service-ca-9c57cc56f-9pqdt" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.501740 4703 request.go:700] Waited for 1.909242432s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-canary/configmaps?fieldSelector=metadata.name%3Dopenshift-service-ca.crt&limit=500&resourceVersion=0 Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.501888 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1ac1debf-d20c-4a3b-abec-64c6e317cb69-proxy-tls\") pod \"machine-config-controller-84d6567774-x56jp\" (UID: \"1ac1debf-d20c-4a3b-abec-64c6e317cb69\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.501807 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/db60e754-3b34-4d6d-8d4c-46384bc04d60-service-ca-bundle\") pod \"router-default-5444994796-qzs78\" (UID: \"db60e754-3b34-4d6d-8d4c-46384bc04d60\") " pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.502098 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/de735d2c-8817-415e-b190-df96bf922407-registration-dir\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.502690 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba-apiservice-cert\") pod \"packageserver-d55dfcdfc-9lxbz\" (UID: \"8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.503355 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.503797 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/194f460d-e3a1-409f-b6ca-7338f62025c0-srv-cert\") pod \"catalog-operator-68c6474976-l4tvp\" (UID: \"194f460d-e3a1-409f-b6ca-7338f62025c0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.503862 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/de735d2c-8817-415e-b190-df96bf922407-mountpoint-dir\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:47 crc kubenswrapper[4703]: E0130 11:58:47.504277 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:48.004261616 +0000 UTC m=+163.782083360 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.504288 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba-tmpfs\") pod \"packageserver-d55dfcdfc-9lxbz\" (UID: \"8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.504790 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/897e6904-d07b-468a-9eff-504d8ce80db1-serving-cert\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.504919 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/33681525-5de5-4f9e-a774-809bd0603c5a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-sn2fc\" (UID: \"33681525-5de5-4f9e-a774-809bd0603c5a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-sn2fc" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.505868 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/897e6904-d07b-468a-9eff-504d8ce80db1-config\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.506116 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/2b49de6a-eb13-487d-b779-6cad65bbbc33-proxy-tls\") pod \"machine-config-operator-74547568cd-w7r6v\" (UID: \"2b49de6a-eb13-487d-b779-6cad65bbbc33\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.506408 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-v286d\" (UID: \"66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-v286d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.506566 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/47544af6-cdac-444e-9c16-37ded5e11e28-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxv85\" (UID: \"47544af6-cdac-444e-9c16-37ded5e11e28\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxv85" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.507013 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c271501b-03fc-4c15-b1d7-e705cbcf16eb-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-xmjbx\" (UID: \"c271501b-03fc-4c15-b1d7-e705cbcf16eb\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-xmjbx" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.507431 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/26730f5b-96f6-40f6-ab66-0500a306f988-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wd7g9\" (UID: \"26730f5b-96f6-40f6-ab66-0500a306f988\") " pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.508370 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/897e6904-d07b-468a-9eff-504d8ce80db1-etcd-client\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.509690 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/61fedc4c-7586-4cdc-b38e-be8a6f3762a1-cert\") pod \"ingress-canary-5zq9g\" (UID: \"61fedc4c-7586-4cdc-b38e-be8a6f3762a1\") " pod="openshift-ingress-canary/ingress-canary-5zq9g" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.511153 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0c06264a-b3b5-4784-9ed9-05fb2c937105-metrics-tls\") pod \"ingress-operator-5b745b69d9-8tl5d\" (UID: \"0c06264a-b3b5-4784-9ed9-05fb2c937105\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.511456 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/db60e754-3b34-4d6d-8d4c-46384bc04d60-metrics-certs\") pod \"router-default-5444994796-qzs78\" (UID: \"db60e754-3b34-4d6d-8d4c-46384bc04d60\") " pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.511613 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/33681525-5de5-4f9e-a774-809bd0603c5a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-sn2fc\" (UID: \"33681525-5de5-4f9e-a774-809bd0603c5a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-sn2fc" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.511732 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3164cd87-2399-4b8f-8314-f663f2fc0b52-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-4xxv7\" (UID: \"3164cd87-2399-4b8f-8314-f663f2fc0b52\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4xxv7" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.512078 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e0ce7c98-966a-4d09-b401-f6cb42c1b08e-metrics-tls\") pod \"dns-operator-744455d44c-jn8hp\" (UID: \"e0ce7c98-966a-4d09-b401-f6cb42c1b08e\") " pod="openshift-dns-operator/dns-operator-744455d44c-jn8hp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.512135 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/db60e754-3b34-4d6d-8d4c-46384bc04d60-stats-auth\") pod \"router-default-5444994796-qzs78\" (UID: \"db60e754-3b34-4d6d-8d4c-46384bc04d60\") " pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.512378 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/194f460d-e3a1-409f-b6ca-7338f62025c0-profile-collector-cert\") pod \"catalog-operator-68c6474976-l4tvp\" (UID: \"194f460d-e3a1-409f-b6ca-7338f62025c0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.513147 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" event={"ID":"68f47bdd-b01b-467d-914f-ef08276b4cc5","Type":"ContainerStarted","Data":"f98bb1aafbe0d6ab20ee44b251970361a06bec69c5a8f64db5191f43e014ec4d"} Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.516396 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2eaf3e49-2394-495c-926e-5504ff81ccc5-secret-volume\") pod \"collect-profiles-29496225-zh786\" (UID: \"2eaf3e49-2394-495c-926e-5504ff81ccc5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.521302 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.534307 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:47 crc kubenswrapper[4703]: E0130 11:58:47.534523 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:48.034493756 +0000 UTC m=+163.812315410 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.535050 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:47 crc kubenswrapper[4703]: E0130 11:58:47.535390 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:48.03537837 +0000 UTC m=+163.813200024 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.548049 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.556627 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.561801 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.564265 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-zwbps" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.565272 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/b4ff5527-1a84-4f96-a063-b74f0e220eb9-metrics-tls\") pod \"dns-default-zlb62\" (UID: \"b4ff5527-1a84-4f96-a063-b74f0e220eb9\") " pod="openshift-dns/dns-default-zlb62" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.573484 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b4ff5527-1a84-4f96-a063-b74f0e220eb9-config-volume\") pod \"dns-default-zlb62\" (UID: \"b4ff5527-1a84-4f96-a063-b74f0e220eb9\") " pod="openshift-dns/dns-default-zlb62" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.582612 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.582645 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.597696 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bth9p" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.602485 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.621942 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.636285 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qwk4" Jan 30 11:58:47 crc kubenswrapper[4703]: I0130 11:58:47.636369 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:47 crc kubenswrapper[4703]: E0130 11:58:47.636991 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:48.136973747 +0000 UTC m=+163.914795401 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.055751 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.071408 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9nq8\" (UniqueName: \"kubernetes.io/projected/a6520b48-04e3-4b95-8c18-0c51e2e41566-kube-api-access-q9nq8\") pod \"openshift-apiserver-operator-796bbdcf4f-4c5n9\" (UID: \"a6520b48-04e3-4b95-8c18-0c51e2e41566\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4c5n9" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.073160 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/146be6e7-a4cb-40c5-bda3-1e9b3c757118-node-bootstrap-token\") pod \"machine-config-server-n5kz2\" (UID: \"146be6e7-a4cb-40c5-bda3-1e9b3c757118\") " pod="openshift-machine-config-operator/machine-config-server-n5kz2" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.075042 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmfk2\" (UniqueName: \"kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-kube-api-access-hmfk2\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.075918 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4spt\" (UniqueName: \"kubernetes.io/projected/28191393-c4b1-4d80-9994-ca31868c9fb4-kube-api-access-k4spt\") pod \"apiserver-76f77b778f-gkllf\" (UID: \"28191393-c4b1-4d80-9994-ca31868c9fb4\") " pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:48 crc kubenswrapper[4703]: E0130 11:58:48.075955 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:48.575937885 +0000 UTC m=+164.353759539 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.076767 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4c5n9" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.077428 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/146be6e7-a4cb-40c5-bda3-1e9b3c757118-certs\") pod \"machine-config-server-n5kz2\" (UID: \"146be6e7-a4cb-40c5-bda3-1e9b3c757118\") " pod="openshift-machine-config-operator/machine-config-server-n5kz2" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.107074 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/115e8738-ea93-49f0-a85b-9c59933c940c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-pvx9n\" (UID: \"115e8738-ea93-49f0-a85b-9c59933c940c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pvx9n" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.107670 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-bound-sa-token\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.116411 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrdcs\" (UniqueName: \"kubernetes.io/projected/33681525-5de5-4f9e-a774-809bd0603c5a-kube-api-access-vrdcs\") pod \"kube-storage-version-migrator-operator-b67b599dd-sn2fc\" (UID: \"33681525-5de5-4f9e-a774-809bd0603c5a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-sn2fc" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.119284 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gl6z4\" (UniqueName: \"kubernetes.io/projected/61fedc4c-7586-4cdc-b38e-be8a6f3762a1-kube-api-access-gl6z4\") pod \"ingress-canary-5zq9g\" (UID: \"61fedc4c-7586-4cdc-b38e-be8a6f3762a1\") " pod="openshift-ingress-canary/ingress-canary-5zq9g" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.120323 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fk4tk\" (UniqueName: \"kubernetes.io/projected/2b49de6a-eb13-487d-b779-6cad65bbbc33-kube-api-access-fk4tk\") pod \"machine-config-operator-74547568cd-w7r6v\" (UID: \"2b49de6a-eb13-487d-b779-6cad65bbbc33\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.122247 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7h45s\" (UniqueName: \"kubernetes.io/projected/1ac1debf-d20c-4a3b-abec-64c6e317cb69-kube-api-access-7h45s\") pod \"machine-config-controller-84d6567774-x56jp\" (UID: \"1ac1debf-d20c-4a3b-abec-64c6e317cb69\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.122395 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-v286d\" (UID: \"66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-v286d" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.128881 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvzdx\" (UniqueName: \"kubernetes.io/projected/2ccbee61-4f6b-4070-ae06-071f1062ad2f-kube-api-access-wvzdx\") pod \"olm-operator-6b444d44fb-22qwf\" (UID: \"2ccbee61-4f6b-4070-ae06-071f1062ad2f\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.129388 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfhnp\" (UniqueName: \"kubernetes.io/projected/db60e754-3b34-4d6d-8d4c-46384bc04d60-kube-api-access-dfhnp\") pod \"router-default-5444994796-qzs78\" (UID: \"db60e754-3b34-4d6d-8d4c-46384bc04d60\") " pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.133847 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.133940 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsh89\" (UniqueName: \"kubernetes.io/projected/b4ff5527-1a84-4f96-a063-b74f0e220eb9-kube-api-access-jsh89\") pod \"dns-default-zlb62\" (UID: \"b4ff5527-1a84-4f96-a063-b74f0e220eb9\") " pod="openshift-dns/dns-default-zlb62" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.134514 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97rtk\" (UniqueName: \"kubernetes.io/projected/3164cd87-2399-4b8f-8314-f663f2fc0b52-kube-api-access-97rtk\") pod \"package-server-manager-789f6589d5-4xxv7\" (UID: \"3164cd87-2399-4b8f-8314-f663f2fc0b52\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4xxv7" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.137271 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9b9sr\" (UniqueName: \"kubernetes.io/projected/575b58cd-4c7e-4d84-bf3e-182ebe6232e8-kube-api-access-9b9sr\") pod \"migrator-59844c95c7-b8qs4\" (UID: \"575b58cd-4c7e-4d84-bf3e-182ebe6232e8\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8qs4" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.137476 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zzvln\" (UniqueName: \"kubernetes.io/projected/aa4aaaa5-396e-4e62-92a3-74b835af58a7-kube-api-access-zzvln\") pod \"console-f9d7485db-mxjx6\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.138267 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbbtl\" (UniqueName: \"kubernetes.io/projected/c271501b-03fc-4c15-b1d7-e705cbcf16eb-kube-api-access-cbbtl\") pod \"multus-admission-controller-857f4d67dd-xmjbx\" (UID: \"c271501b-03fc-4c15-b1d7-e705cbcf16eb\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-xmjbx" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.138969 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2rhr\" (UniqueName: \"kubernetes.io/projected/8a7284fa-8f0a-461c-9753-4636c4f6e3f0-kube-api-access-l2rhr\") pod \"service-ca-9c57cc56f-9pqdt\" (UID: \"8a7284fa-8f0a-461c-9753-4636c4f6e3f0\") " pod="openshift-service-ca/service-ca-9c57cc56f-9pqdt" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.141664 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8f5m\" (UniqueName: \"kubernetes.io/projected/0c06264a-b3b5-4784-9ed9-05fb2c937105-kube-api-access-r8f5m\") pod \"ingress-operator-5b745b69d9-8tl5d\" (UID: \"0c06264a-b3b5-4784-9ed9-05fb2c937105\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.142374 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4xxv7" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.148782 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dq4rv\" (UniqueName: \"kubernetes.io/projected/bb27e0bb-49da-4e6c-a7f7-e8addc64b009-kube-api-access-dq4rv\") pod \"service-ca-operator-777779d784-v7hlb\" (UID: \"bb27e0bb-49da-4e6c-a7f7-e8addc64b009\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-v7hlb" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.149751 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d677h\" (UniqueName: \"kubernetes.io/projected/146be6e7-a4cb-40c5-bda3-1e9b3c757118-kube-api-access-d677h\") pod \"machine-config-server-n5kz2\" (UID: \"146be6e7-a4cb-40c5-bda3-1e9b3c757118\") " pod="openshift-machine-config-operator/machine-config-server-n5kz2" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.152395 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0c06264a-b3b5-4784-9ed9-05fb2c937105-bound-sa-token\") pod \"ingress-operator-5b745b69d9-8tl5d\" (UID: \"0c06264a-b3b5-4784-9ed9-05fb2c937105\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.154888 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t429q\" (UniqueName: \"kubernetes.io/projected/194f460d-e3a1-409f-b6ca-7338f62025c0-kube-api-access-t429q\") pod \"catalog-operator-68c6474976-l4tvp\" (UID: \"194f460d-e3a1-409f-b6ca-7338f62025c0\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.155506 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhth4\" (UniqueName: \"kubernetes.io/projected/56158b04-1a02-453d-b48a-a107343a3955-kube-api-access-xhth4\") pod \"oauth-openshift-558db77b4-d4tv6\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.158238 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:48 crc kubenswrapper[4703]: E0130 11:58:48.158596 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:48.658583283 +0000 UTC m=+164.436404937 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.159904 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45c9n\" (UniqueName: \"kubernetes.io/projected/e0ce7c98-966a-4d09-b401-f6cb42c1b08e-kube-api-access-45c9n\") pod \"dns-operator-744455d44c-jn8hp\" (UID: \"e0ce7c98-966a-4d09-b401-f6cb42c1b08e\") " pod="openshift-dns-operator/dns-operator-744455d44c-jn8hp" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.168873 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-9pqdt" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.180367 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b887r\" (UniqueName: \"kubernetes.io/projected/40e464c0-96c5-4c69-8537-ef29b93319ab-kube-api-access-b887r\") pod \"control-plane-machine-set-operator-78cbb6b69f-92h6w\" (UID: \"40e464c0-96c5-4c69-8537-ef29b93319ab\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-92h6w" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.186269 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.198604 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.206673 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.208787 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-v7hlb" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.233927 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jkxg\" (UniqueName: \"kubernetes.io/projected/8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba-kube-api-access-6jkxg\") pod \"packageserver-d55dfcdfc-9lxbz\" (UID: \"8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.242932 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcjr8\" (UniqueName: \"kubernetes.io/projected/26730f5b-96f6-40f6-ab66-0500a306f988-kube-api-access-rcjr8\") pod \"marketplace-operator-79b997595-wd7g9\" (UID: \"26730f5b-96f6-40f6-ab66-0500a306f988\") " pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.265678 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-5zq9g" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.267714 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbspk\" (UniqueName: \"kubernetes.io/projected/897e6904-d07b-468a-9eff-504d8ce80db1-kube-api-access-sbspk\") pod \"etcd-operator-b45778765-vt7hb\" (UID: \"897e6904-d07b-468a-9eff-504d8ce80db1\") " pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.268305 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-zlb62" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.269772 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:48 crc kubenswrapper[4703]: E0130 11:58:48.270051 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:48.770039698 +0000 UTC m=+164.547861352 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.272389 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-n5kz2" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.290203 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92rr9\" (UniqueName: \"kubernetes.io/projected/2eaf3e49-2394-495c-926e-5504ff81ccc5-kube-api-access-92rr9\") pod \"collect-profiles-29496225-zh786\" (UID: \"2eaf3e49-2394-495c-926e-5504ff81ccc5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.297705 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4hj9\" (UniqueName: \"kubernetes.io/projected/de735d2c-8817-415e-b190-df96bf922407-kube-api-access-w4hj9\") pod \"csi-hostpathplugin-wsjdp\" (UID: \"de735d2c-8817-415e-b190-df96bf922407\") " pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.332599 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.358920 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.359675 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.363715 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-sn2fc" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.369474 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pvx9n" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.370359 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:48 crc kubenswrapper[4703]: E0130 11:58:48.370917 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:48.870890023 +0000 UTC m=+164.648711677 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.530890 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:48 crc kubenswrapper[4703]: E0130 11:58:48.531529 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:49.031507036 +0000 UTC m=+164.809328770 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.535063 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.535731 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8qs4" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.536828 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.536947 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.537461 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-xmjbx" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.537816 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-jn8hp" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.537997 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.537858 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-v286d" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.538812 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.542851 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.545485 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.546194 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/47544af6-cdac-444e-9c16-37ded5e11e28-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-sxv85\" (UID: \"47544af6-cdac-444e-9c16-37ded5e11e28\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxv85" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.555358 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-92h6w" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.633319 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:48 crc kubenswrapper[4703]: E0130 11:58:48.633468 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:49.133433891 +0000 UTC m=+164.911255545 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.633833 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:48 crc kubenswrapper[4703]: E0130 11:58:48.634379 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:49.134360147 +0000 UTC m=+164.912181801 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.670924 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-q7gv4"] Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.725453 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxv85" Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.735492 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:48 crc kubenswrapper[4703]: E0130 11:58:48.735955 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:49.235927952 +0000 UTC m=+165.013749606 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.736053 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:48 crc kubenswrapper[4703]: E0130 11:58:48.737243 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:49.237225039 +0000 UTC m=+165.015046703 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.836884 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:48 crc kubenswrapper[4703]: E0130 11:58:48.841319 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:49.341293375 +0000 UTC m=+165.119115029 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:48 crc kubenswrapper[4703]: I0130 11:58:48.872985 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:48 crc kubenswrapper[4703]: E0130 11:58:48.873636 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:49.373611623 +0000 UTC m=+165.151433277 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.000847 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:49 crc kubenswrapper[4703]: E0130 11:58:49.001068 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:49.501040763 +0000 UTC m=+165.278862427 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.001623 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:49 crc kubenswrapper[4703]: E0130 11:58:49.002003 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:49.50198866 +0000 UTC m=+165.279810314 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.173942 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:49 crc kubenswrapper[4703]: E0130 11:58:49.175375 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:49.675340054 +0000 UTC m=+165.453161708 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.181991 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:49 crc kubenswrapper[4703]: E0130 11:58:49.182478 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:49.682456986 +0000 UTC m=+165.460278640 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.282887 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:49 crc kubenswrapper[4703]: E0130 11:58:49.283069 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:49.783034093 +0000 UTC m=+165.560855757 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.283275 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:49 crc kubenswrapper[4703]: E0130 11:58:49.283574 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:49.783566918 +0000 UTC m=+165.561388572 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.519474 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:49 crc kubenswrapper[4703]: E0130 11:58:49.519993 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:50.019976473 +0000 UTC m=+165.797798127 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.642374 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:49 crc kubenswrapper[4703]: E0130 11:58:49.642789 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:50.142774582 +0000 UTC m=+165.920596236 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.792189 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:49 crc kubenswrapper[4703]: E0130 11:58:49.792442 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:50.292412691 +0000 UTC m=+166.070234345 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.792494 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:49 crc kubenswrapper[4703]: E0130 11:58:49.792808 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:50.292796463 +0000 UTC m=+166.070618117 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.836166 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-n5kz2" event={"ID":"146be6e7-a4cb-40c5-bda3-1e9b3c757118","Type":"ContainerStarted","Data":"be2488738d26ac3600c7cca6938a5e146ff67a083f214a1758b943923446c619"} Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.842980 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj"] Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.843260 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" event={"ID":"68f47bdd-b01b-467d-914f-ef08276b4cc5","Type":"ContainerStarted","Data":"5fa90a605d2fdf032a9abeedc5a9b29ce0c5f7545d0e314cd8b7e3f989e5e2ce"} Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.847152 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-q7gv4" event={"ID":"1441806f-c27a-4a93-82e3-123caba174c5","Type":"ContainerStarted","Data":"6d18e4a28bad0154a954e3fe57bf9994ddd87140fafde24ea995349f5797bdd0"} Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.858504 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t"] Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.897494 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8"] Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.898448 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:49 crc kubenswrapper[4703]: E0130 11:58:49.898869 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:50.398852234 +0000 UTC m=+166.176673898 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.901226 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-x5w7b"] Jan 30 11:58:49 crc kubenswrapper[4703]: W0130 11:58:49.908405 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf598eb59_e841_4a92_bd81_926bb698c44e.slice/crio-0d141d3c4cb745c03db06cc7e72408a58c3105c7f69cc1f70229c530594c7e52 WatchSource:0}: Error finding container 0d141d3c4cb745c03db06cc7e72408a58c3105c7f69cc1f70229c530594c7e52: Status 404 returned error can't find the container with id 0d141d3c4cb745c03db06cc7e72408a58c3105c7f69cc1f70229c530594c7e52 Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.908509 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789"] Jan 30 11:58:49 crc kubenswrapper[4703]: I0130 11:58:49.917744 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dv4qg"] Jan 30 11:58:50 crc kubenswrapper[4703]: I0130 11:58:50.001454 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:50 crc kubenswrapper[4703]: E0130 11:58:50.001843 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:50.50183103 +0000 UTC m=+166.279652684 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:50 crc kubenswrapper[4703]: I0130 11:58:50.114992 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:50 crc kubenswrapper[4703]: E0130 11:58:50.115787 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:50.615767966 +0000 UTC m=+166.393589620 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:50 crc kubenswrapper[4703]: I0130 11:58:50.220225 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:50 crc kubenswrapper[4703]: E0130 11:58:50.220604 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:50.720592664 +0000 UTC m=+166.498414318 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:50 crc kubenswrapper[4703]: I0130 11:58:50.370544 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:50 crc kubenswrapper[4703]: E0130 11:58:50.371079 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:50.871054618 +0000 UTC m=+166.648876272 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:50 crc kubenswrapper[4703]: I0130 11:58:50.509821 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:50 crc kubenswrapper[4703]: E0130 11:58:50.510201 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:51.01018861 +0000 UTC m=+166.788010264 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:50 crc kubenswrapper[4703]: I0130 11:58:50.610849 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:50 crc kubenswrapper[4703]: E0130 11:58:50.611032 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:51.111003774 +0000 UTC m=+166.888825428 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:50 crc kubenswrapper[4703]: I0130 11:58:50.611170 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:50 crc kubenswrapper[4703]: E0130 11:58:50.611619 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:51.11160054 +0000 UTC m=+166.889422194 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:50 crc kubenswrapper[4703]: I0130 11:58:50.712486 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:50 crc kubenswrapper[4703]: E0130 11:58:50.712878 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:51.212857827 +0000 UTC m=+166.990679481 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:50 crc kubenswrapper[4703]: I0130 11:58:50.814097 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:50 crc kubenswrapper[4703]: E0130 11:58:50.814542 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:51.314525594 +0000 UTC m=+167.092347248 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.081493 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:51 crc kubenswrapper[4703]: E0130 11:58:51.082184 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:51.582169638 +0000 UTC m=+167.359991292 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.202255 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:51 crc kubenswrapper[4703]: E0130 11:58:51.202567 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:51.702555587 +0000 UTC m=+167.480377241 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.248111 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" event={"ID":"3e848933-041a-4f18-89af-8f369b7eebcc","Type":"ContainerStarted","Data":"cd3dabf3d2acb56ab24c928b99f754fd0f4512498c6987aee9920a4edf3a1f59"} Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.248167 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" event={"ID":"3e848933-041a-4f18-89af-8f369b7eebcc","Type":"ContainerStarted","Data":"c2ffca71ca103ef4fcd3194fd388a4008ae9c4a510b9f35b7fb41480d0019d77"} Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.249544 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.250824 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" event={"ID":"034ae162-4298-4f36-8b7a-eaf5a9fe70ca","Type":"ContainerStarted","Data":"d22de511f561a344d6021239c96163ad3d7a443dab0c831bb3d461b003cb55bf"} Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.250840 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" event={"ID":"034ae162-4298-4f36-8b7a-eaf5a9fe70ca","Type":"ContainerStarted","Data":"b26d681b258ea563d656eb3373f447a662196316ff0793594ba7a56ce826d82f"} Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.251386 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.252654 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" event={"ID":"66c74144-f023-4e32-a378-d61b9cb574a5","Type":"ContainerStarted","Data":"d83c41025460f32645f0edc63f6b27ff3f2e526e18ec10b63c621369c7ffca6b"} Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.252680 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" event={"ID":"66c74144-f023-4e32-a378-d61b9cb574a5","Type":"ContainerStarted","Data":"7f7a0e1cb200747dc6ce9376d3e3d54bdf79f0848f9ac079c6a3614326b63974"} Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.337264 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:51 crc kubenswrapper[4703]: E0130 11:58:51.337364 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:51.837343486 +0000 UTC m=+167.615165140 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.338195 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:51 crc kubenswrapper[4703]: E0130 11:58:51.341564 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:51.841545096 +0000 UTC m=+167.619366850 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.365507 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" event={"ID":"68f47bdd-b01b-467d-914f-ef08276b4cc5","Type":"ContainerStarted","Data":"1554a4c75e22235135f5447e6024320a57d2df71eb9d8d0ed2cd9cd34122d06f"} Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.389831 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-q7gv4" event={"ID":"1441806f-c27a-4a93-82e3-123caba174c5","Type":"ContainerStarted","Data":"84c44c7e0ca622cf587ac57b54dde29fd5b63a01d41da8a617c675860133ca6e"} Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.390161 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-q7gv4" Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.391768 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-qzs78" event={"ID":"db60e754-3b34-4d6d-8d4c-46384bc04d60","Type":"ContainerStarted","Data":"33cf75c513e4e1fc83c0b3bf1737af769652c771aa2f04c7d361bc2760a417d4"} Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.391810 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-qzs78" event={"ID":"db60e754-3b34-4d6d-8d4c-46384bc04d60","Type":"ContainerStarted","Data":"40248b701128c05078384d267a5c2292d55e28a3340662891bc283ddf8a9649d"} Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.415731 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" event={"ID":"8ff6e057-d092-41f8-908a-7f718f8e7813","Type":"ContainerStarted","Data":"639bb901c8f560508b2bef1c7c6857442196425355a77f5d0ab62ca81e649ccf"} Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.416028 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" event={"ID":"8ff6e057-d092-41f8-908a-7f718f8e7813","Type":"ContainerStarted","Data":"99d087b4a240ea3f5e09d83038692740708f89c656879035725852db2b8ef4b0"} Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.418737 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t" event={"ID":"f598eb59-e841-4a92-bd81-926bb698c44e","Type":"ContainerStarted","Data":"8a8181822df2d3ffd0d9840b67c92b11074fafd12c71ed64191bcadfc7636f91"} Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.418916 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t" event={"ID":"f598eb59-e841-4a92-bd81-926bb698c44e","Type":"ContainerStarted","Data":"0d141d3c4cb745c03db06cc7e72408a58c3105c7f69cc1f70229c530594c7e52"} Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.421309 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-n5kz2" event={"ID":"146be6e7-a4cb-40c5-bda3-1e9b3c757118","Type":"ContainerStarted","Data":"7fce57ae268c25b628a5a3560d26a9da521d94756bde26a115cef45288eef71d"} Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.423181 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" podStartSLOduration=139.423098931 podStartE2EDuration="2m19.423098931s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:51.389581789 +0000 UTC m=+167.167403443" watchObservedRunningTime="2026-01-30 11:58:51.423098931 +0000 UTC m=+167.200920585" Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.423265 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" event={"ID":"988a730f-bd1f-45b8-97a9-c14bea7d749e","Type":"ContainerStarted","Data":"0b0a33144cfc2a594dcbe4c9016336492c02e47f25cc86398f7b35a84bd22ddd"} Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.423313 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" event={"ID":"988a730f-bd1f-45b8-97a9-c14bea7d749e","Type":"ContainerStarted","Data":"040dd217564fa311aefbcebb0b9cd8c705a90693148214b8d4ec6fcd641ee575"} Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.439620 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:51 crc kubenswrapper[4703]: E0130 11:58:51.439822 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:51.939786475 +0000 UTC m=+167.717608129 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.440707 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:51 crc kubenswrapper[4703]: E0130 11:58:51.441387 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:51.94137142 +0000 UTC m=+167.719193074 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.538838 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.545458 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:51 crc kubenswrapper[4703]: E0130 11:58:51.546986 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:52.04697101 +0000 UTC m=+167.824792664 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.572876 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-8dk4t" podStartSLOduration=140.572853895 podStartE2EDuration="2m20.572853895s" podCreationTimestamp="2026-01-30 11:56:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:51.571965669 +0000 UTC m=+167.349787333" watchObservedRunningTime="2026-01-30 11:58:51.572853895 +0000 UTC m=+167.350675549" Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.594483 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" podStartSLOduration=139.594464738 podStartE2EDuration="2m19.594464738s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:51.590750754 +0000 UTC m=+167.368572408" watchObservedRunningTime="2026-01-30 11:58:51.594464738 +0000 UTC m=+167.372286392" Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.620867 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.620926 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.620873 4703 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-x28r8 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.621226 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" podUID="3e848933-041a-4f18-89af-8f369b7eebcc" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.620874 4703 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-dv4qg container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.621261 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" podUID="034ae162-4298-4f36-8b7a-eaf5a9fe70ca" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.647384 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:51 crc kubenswrapper[4703]: E0130 11:58:51.648291 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:52.148278778 +0000 UTC m=+167.926100432 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.673249 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-qzs78" podStartSLOduration=139.673232247 podStartE2EDuration="2m19.673232247s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:51.672901117 +0000 UTC m=+167.450722771" watchObservedRunningTime="2026-01-30 11:58:51.673232247 +0000 UTC m=+167.451053901" Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.676667 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.676696 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.750384 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:51 crc kubenswrapper[4703]: E0130 11:58:51.750519 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:52.250481771 +0000 UTC m=+168.028303425 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.750682 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:51 crc kubenswrapper[4703]: E0130 11:58:51.751023 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:52.251013606 +0000 UTC m=+168.028835260 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.839732 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-q7gv4" podStartSLOduration=139.839712306 podStartE2EDuration="2m19.839712306s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:51.838745098 +0000 UTC m=+167.616566752" watchObservedRunningTime="2026-01-30 11:58:51.839712306 +0000 UTC m=+167.617533960" Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.842449 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-6969t" podStartSLOduration=139.842425833 podStartE2EDuration="2m19.842425833s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:51.806619495 +0000 UTC m=+167.584441159" watchObservedRunningTime="2026-01-30 11:58:51.842425833 +0000 UTC m=+167.620247487" Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.851324 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:51 crc kubenswrapper[4703]: E0130 11:58:51.851725 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:52.351703706 +0000 UTC m=+168.129525360 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.867338 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-n5kz2" podStartSLOduration=6.867316139 podStartE2EDuration="6.867316139s" podCreationTimestamp="2026-01-30 11:58:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:51.865192889 +0000 UTC m=+167.643014563" watchObservedRunningTime="2026-01-30 11:58:51.867316139 +0000 UTC m=+167.645137793" Jan 30 11:58:51 crc kubenswrapper[4703]: I0130 11:58:51.953087 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:51 crc kubenswrapper[4703]: E0130 11:58:51.953477 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:52.453461476 +0000 UTC m=+168.231283130 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.054764 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:52 crc kubenswrapper[4703]: E0130 11:58:52.055261 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:52.555239048 +0000 UTC m=+168.333060702 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.107427 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-zlb62"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.129571 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bth9p"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.134699 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zwbps"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.135018 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.162305 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:52 crc kubenswrapper[4703]: E0130 11:58:52.162698 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:52.662684949 +0000 UTC m=+168.440506603 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.264533 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:52 crc kubenswrapper[4703]: E0130 11:58:52.264919 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:52.764904643 +0000 UTC m=+168.542726297 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.422724 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:52 crc kubenswrapper[4703]: E0130 11:58:52.423229 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:52.923217596 +0000 UTC m=+168.701039240 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.470283 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-v7hlb"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.476569 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.504604 4703 generic.go:334] "Generic (PLEG): container finished" podID="988a730f-bd1f-45b8-97a9-c14bea7d749e" containerID="0b0a33144cfc2a594dcbe4c9016336492c02e47f25cc86398f7b35a84bd22ddd" exitCode=0 Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.504676 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" event={"ID":"988a730f-bd1f-45b8-97a9-c14bea7d749e","Type":"ContainerDied","Data":"0b0a33144cfc2a594dcbe4c9016336492c02e47f25cc86398f7b35a84bd22ddd"} Jan 30 11:58:52 crc kubenswrapper[4703]: W0130 11:58:52.505174 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2ccbee61_4f6b_4070_ae06_071f1062ad2f.slice/crio-ae45380d5dc8583fe62b6822baf48b5534ee8053e3244298e6a619e4c73f11b7 WatchSource:0}: Error finding container ae45380d5dc8583fe62b6822baf48b5534ee8053e3244298e6a619e4c73f11b7: Status 404 returned error can't find the container with id ae45380d5dc8583fe62b6822baf48b5534ee8053e3244298e6a619e4c73f11b7 Jan 30 11:58:52 crc kubenswrapper[4703]: W0130 11:58:52.507217 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbb27e0bb_49da_4e6c_a7f7_e8addc64b009.slice/crio-841443bdcec5f7a44b05becc5ab4af2a1a5172b9a54307eb6e342c88a0d312c8 WatchSource:0}: Error finding container 841443bdcec5f7a44b05becc5ab4af2a1a5172b9a54307eb6e342c88a0d312c8: Status 404 returned error can't find the container with id 841443bdcec5f7a44b05becc5ab4af2a1a5172b9a54307eb6e342c88a0d312c8 Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.510178 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pvx9n"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.516412 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bth9p" event={"ID":"5c74cde9-f16e-4ee1-a881-868f6d3b9865","Type":"ContainerStarted","Data":"012843e34e1dfb7728e4dbe909600d5e56edce71daea220b16201981bf5edb1c"} Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.523812 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:52 crc kubenswrapper[4703]: E0130 11:58:52.524929 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:53.024908208 +0000 UTC m=+168.802729862 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.524965 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-zlb62" event={"ID":"b4ff5527-1a84-4f96-a063-b74f0e220eb9","Type":"ContainerStarted","Data":"22dbfb61544d867e680c4683129e7842416653344c7b568c2c69b6f77380037c"} Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.534466 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp" event={"ID":"1ac1debf-d20c-4a3b-abec-64c6e317cb69","Type":"ContainerStarted","Data":"2d6e834ef173eca21ce176a84173c1b3a53375a5b1f70196d0c6c4c1cdc990c0"} Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.534519 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-xfvl5"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.537327 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qwk4"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.546492 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.546561 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.546745 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4xxv7"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.554163 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.555800 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-9pqdt"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.560053 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jn8hp"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.563174 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-wsjdp"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.569424 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4c5n9"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.572439 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" event={"ID":"8ff6e057-d092-41f8-908a-7f718f8e7813","Type":"ContainerStarted","Data":"b1c319c87f79d8d24fb9a007b0a85f3cea9392b2fbcf512a8e372a734ab65498"} Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.575406 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-zwbps" event={"ID":"64f5798c-a6f3-4a7e-9b16-f3039aab5a23","Type":"ContainerStarted","Data":"88c6f627ec34837bf571fbe048941d9ebade6187ce7e2a99a65556f729fccce6"} Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.578369 4703 generic.go:334] "Generic (PLEG): container finished" podID="66c74144-f023-4e32-a378-d61b9cb574a5" containerID="d83c41025460f32645f0edc63f6b27ff3f2e526e18ec10b63c621369c7ffca6b" exitCode=0 Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.579350 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" event={"ID":"66c74144-f023-4e32-a378-d61b9cb574a5","Type":"ContainerDied","Data":"d83c41025460f32645f0edc63f6b27ff3f2e526e18ec10b63c621369c7ffca6b"} Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.579395 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" event={"ID":"66c74144-f023-4e32-a378-d61b9cb574a5","Type":"ContainerStarted","Data":"70a08f293dba53c89ba83cebaacd6629feb68f5bf8676daf03649de7d8fad1af"} Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.580459 4703 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-x28r8 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" start-of-body= Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.580511 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" podUID="3e848933-041a-4f18-89af-8f369b7eebcc" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": dial tcp 10.217.0.8:8443: connect: connection refused" Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.581182 4703 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-dv4qg container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.581219 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" podUID="034ae162-4298-4f36-8b7a-eaf5a9fe70ca" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 30 11:58:52 crc kubenswrapper[4703]: W0130 11:58:52.581307 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod115e8738_ea93_49f0_a85b_9c59933c940c.slice/crio-39aebf1c11872a1e68f7c733c6e41403b251634edcd734ce8d17231e39a9d293 WatchSource:0}: Error finding container 39aebf1c11872a1e68f7c733c6e41403b251634edcd734ce8d17231e39a9d293: Status 404 returned error can't find the container with id 39aebf1c11872a1e68f7c733c6e41403b251634edcd734ce8d17231e39a9d293 Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.582023 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.582052 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.604984 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-x5w7b" podStartSLOduration=140.604969332 podStartE2EDuration="2m20.604969332s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:52.598930641 +0000 UTC m=+168.376752295" watchObservedRunningTime="2026-01-30 11:58:52.604969332 +0000 UTC m=+168.382790986" Jan 30 11:58:52 crc kubenswrapper[4703]: W0130 11:58:52.623546 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3164cd87_2399_4b8f_8314_f663f2fc0b52.slice/crio-bfa2b31780029ddaa999bcdc3ac2482d22562bbefa49ad6b76d438d68130deba WatchSource:0}: Error finding container bfa2b31780029ddaa999bcdc3ac2482d22562bbefa49ad6b76d438d68130deba: Status 404 returned error can't find the container with id bfa2b31780029ddaa999bcdc3ac2482d22562bbefa49ad6b76d438d68130deba Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.626901 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:52 crc kubenswrapper[4703]: E0130 11:58:52.627324 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:53.127309367 +0000 UTC m=+168.905131021 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.629093 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" podStartSLOduration=140.629078097 podStartE2EDuration="2m20.629078097s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:52.626144654 +0000 UTC m=+168.403966308" watchObservedRunningTime="2026-01-30 11:58:52.629078097 +0000 UTC m=+168.406899751" Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.708450 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-vt7hb"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.710765 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wd7g9"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.722100 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-xmjbx"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.728233 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:52 crc kubenswrapper[4703]: E0130 11:58:52.735758 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:53.235702086 +0000 UTC m=+169.013523740 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.738495 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-v286d"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.950830 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:52 crc kubenswrapper[4703]: E0130 11:58:52.951247 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:53.451231167 +0000 UTC m=+169.229052821 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.956262 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-sn2fc"] Jan 30 11:58:52 crc kubenswrapper[4703]: I0130 11:58:52.975628 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxv85"] Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.000058 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786"] Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.021636 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d"] Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.034915 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-b8qs4"] Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.045224 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-mxjx6"] Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.046773 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-5zq9g"] Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.053962 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:53 crc kubenswrapper[4703]: E0130 11:58:53.054365 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:53.554341287 +0000 UTC m=+169.332162951 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.056478 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v"] Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.060275 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-92h6w"] Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.063392 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-gkllf"] Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.072181 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz"] Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.076342 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-d4tv6"] Jan 30 11:58:53 crc kubenswrapper[4703]: W0130 11:58:53.090635 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod66ebbbe6_076c_4d9b_a2e2_5aea0188b8bb.slice/crio-7d061c6ec88f6f465ad95ac96feb6230d8db53925f3ecdfe39805accc9e60cff WatchSource:0}: Error finding container 7d061c6ec88f6f465ad95ac96feb6230d8db53925f3ecdfe39805accc9e60cff: Status 404 returned error can't find the container with id 7d061c6ec88f6f465ad95ac96feb6230d8db53925f3ecdfe39805accc9e60cff Jan 30 11:58:53 crc kubenswrapper[4703]: W0130 11:58:53.097527 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56158b04_1a02_453d_b48a_a107343a3955.slice/crio-7cfda9553d79f006714959aeb9659721ce71f56eb4acbbfb61560207b16635d7 WatchSource:0}: Error finding container 7cfda9553d79f006714959aeb9659721ce71f56eb4acbbfb61560207b16635d7: Status 404 returned error can't find the container with id 7cfda9553d79f006714959aeb9659721ce71f56eb4acbbfb61560207b16635d7 Jan 30 11:58:53 crc kubenswrapper[4703]: W0130 11:58:53.107509 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod33681525_5de5_4f9e_a774_809bd0603c5a.slice/crio-7837dd56efc6cf8b660ba81a815a88c1c51a05a862376dcbd17dfcbf8046c508 WatchSource:0}: Error finding container 7837dd56efc6cf8b660ba81a815a88c1c51a05a862376dcbd17dfcbf8046c508: Status 404 returned error can't find the container with id 7837dd56efc6cf8b660ba81a815a88c1c51a05a862376dcbd17dfcbf8046c508 Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.162597 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:53 crc kubenswrapper[4703]: E0130 11:58:53.162972 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:53.662960563 +0000 UTC m=+169.440782217 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:53 crc kubenswrapper[4703]: W0130 11:58:53.164073 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaa4aaaa5_396e_4e62_92a3_74b835af58a7.slice/crio-70dde461d8ef0383724a92d817faba63e7464a6eeb43fe4193addee302ed5001 WatchSource:0}: Error finding container 70dde461d8ef0383724a92d817faba63e7464a6eeb43fe4193addee302ed5001: Status 404 returned error can't find the container with id 70dde461d8ef0383724a92d817faba63e7464a6eeb43fe4193addee302ed5001 Jan 30 11:58:53 crc kubenswrapper[4703]: W0130 11:58:53.164480 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod575b58cd_4c7e_4d84_bf3e_182ebe6232e8.slice/crio-d4316fb4f940256cca836a982fc6f66e3b673e0f006ac684c271ca8a7dff7ba8 WatchSource:0}: Error finding container d4316fb4f940256cca836a982fc6f66e3b673e0f006ac684c271ca8a7dff7ba8: Status 404 returned error can't find the container with id d4316fb4f940256cca836a982fc6f66e3b673e0f006ac684c271ca8a7dff7ba8 Jan 30 11:58:53 crc kubenswrapper[4703]: W0130 11:58:53.164669 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c06264a_b3b5_4784_9ed9_05fb2c937105.slice/crio-8be5eda16aa58b1822fbf7f317d7a74270ad1f299e93aad013a09292bb8fc21a WatchSource:0}: Error finding container 8be5eda16aa58b1822fbf7f317d7a74270ad1f299e93aad013a09292bb8fc21a: Status 404 returned error can't find the container with id 8be5eda16aa58b1822fbf7f317d7a74270ad1f299e93aad013a09292bb8fc21a Jan 30 11:58:53 crc kubenswrapper[4703]: W0130 11:58:53.166411 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod61fedc4c_7586_4cdc_b38e_be8a6f3762a1.slice/crio-040d0b874db5aa30a27f75e51055b4f0da904cd99ac35ef57d97fa4a8e238d44 WatchSource:0}: Error finding container 040d0b874db5aa30a27f75e51055b4f0da904cd99ac35ef57d97fa4a8e238d44: Status 404 returned error can't find the container with id 040d0b874db5aa30a27f75e51055b4f0da904cd99ac35ef57d97fa4a8e238d44 Jan 30 11:58:53 crc kubenswrapper[4703]: W0130 11:58:53.179277 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8879bcc7_3fe8_4982_9d10_18c8c4e9a8ba.slice/crio-f808313f1c7477167985d582e65c720cb9b8135d33165e70970139b844f4ba26 WatchSource:0}: Error finding container f808313f1c7477167985d582e65c720cb9b8135d33165e70970139b844f4ba26: Status 404 returned error can't find the container with id f808313f1c7477167985d582e65c720cb9b8135d33165e70970139b844f4ba26 Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.430535 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:53 crc kubenswrapper[4703]: E0130 11:58:53.430925 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:53.930909303 +0000 UTC m=+169.708730957 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.520520 4703 csr.go:261] certificate signing request csr-m9plt is approved, waiting to be issued Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.528687 4703 csr.go:257] certificate signing request csr-m9plt is issued Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.531926 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:53 crc kubenswrapper[4703]: E0130 11:58:53.532802 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:54.032767457 +0000 UTC m=+169.810589101 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.546313 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:58:53 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:58:53 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:58:53 crc kubenswrapper[4703]: healthz check failed Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.546380 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.773470 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:53 crc kubenswrapper[4703]: E0130 11:58:53.773599 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:54.273544996 +0000 UTC m=+170.051366650 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.773713 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:53 crc kubenswrapper[4703]: E0130 11:58:53.774138 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:54.274100431 +0000 UTC m=+170.051922085 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.792708 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8qs4" event={"ID":"575b58cd-4c7e-4d84-bf3e-182ebe6232e8","Type":"ContainerStarted","Data":"d4316fb4f940256cca836a982fc6f66e3b673e0f006ac684c271ca8a7dff7ba8"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.794560 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" event={"ID":"897e6904-d07b-468a-9eff-504d8ce80db1","Type":"ContainerStarted","Data":"4d90c10f81d4973da34f995a4996d1dfcfa70f44428db77b54754e64bd52fc55"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.797415 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-zwbps" event={"ID":"64f5798c-a6f3-4a7e-9b16-f3039aab5a23","Type":"ContainerStarted","Data":"f05a662aa39ac246ab8684db42c926472bd2ff70b0f5f97e4d5984f36b520aa5"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.799238 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-zwbps" Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.809562 4703 patch_prober.go:28] interesting pod/console-operator-58897d9998-zwbps container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.809649 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zwbps" podUID="64f5798c-a6f3-4a7e-9b16-f3039aab5a23" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.813613 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" event={"ID":"de735d2c-8817-415e-b190-df96bf922407","Type":"ContainerStarted","Data":"2e69ea1733ad78730f8633e4c316aec74d92dfd1dae5e128552c0f882053b6b9"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.834849 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" event={"ID":"8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba","Type":"ContainerStarted","Data":"f808313f1c7477167985d582e65c720cb9b8135d33165e70970139b844f4ba26"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.836643 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-zwbps" podStartSLOduration=141.836619548 podStartE2EDuration="2m21.836619548s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:53.832171051 +0000 UTC m=+169.609992705" watchObservedRunningTime="2026-01-30 11:58:53.836619548 +0000 UTC m=+169.614441202" Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.838508 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" event={"ID":"2eaf3e49-2394-495c-926e-5504ff81ccc5","Type":"ContainerStarted","Data":"c17e92e00793f9efbbac5acb7f9dc233c3a17a291e5259edbee12560790d2c4d"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.852482 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jn8hp" event={"ID":"e0ce7c98-966a-4d09-b401-f6cb42c1b08e","Type":"ContainerStarted","Data":"8b4aafc50acd51ec1c0fcb0ed9f5e64cea0427200e9ddfd4e2c44a42ab2ed3f0"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.855609 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bth9p" event={"ID":"5c74cde9-f16e-4ee1-a881-868f6d3b9865","Type":"ContainerStarted","Data":"0ce40b0c486669900de949f027889684d07849857ffcb72cd8277f9d1ffcd244"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.857987 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-92h6w" event={"ID":"40e464c0-96c5-4c69-8537-ef29b93319ab","Type":"ContainerStarted","Data":"d4714bf6561b8f13c4d6f58b21eb6c735f0a49a4475cf2e2e62c40bcafe07f4c"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.859431 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-xmjbx" event={"ID":"c271501b-03fc-4c15-b1d7-e705cbcf16eb","Type":"ContainerStarted","Data":"16821631bd2607230d6537db03eac3f6cb451ddec82cd275294a7c103e49a769"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.876491 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:53 crc kubenswrapper[4703]: E0130 11:58:53.876763 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:54.376739177 +0000 UTC m=+170.154560831 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.877735 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:53 crc kubenswrapper[4703]: E0130 11:58:53.879046 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:54.379032662 +0000 UTC m=+170.156854316 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.914408 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-v286d" event={"ID":"66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb","Type":"ContainerStarted","Data":"7d061c6ec88f6f465ad95ac96feb6230d8db53925f3ecdfe39805accc9e60cff"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.917199 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" event={"ID":"26730f5b-96f6-40f6-ab66-0500a306f988","Type":"ContainerStarted","Data":"ad84e63cc0a4b9de60398d3593e6e2350d5da667e18bfd16cc613919d070fbd3"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.918590 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxv85" event={"ID":"47544af6-cdac-444e-9c16-37ded5e11e28","Type":"ContainerStarted","Data":"afe148df5bb4beca8307bd59f4574ef9f64789fef94a447b695c56f7e9f25053"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.920338 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" event={"ID":"56158b04-1a02-453d-b48a-a107343a3955","Type":"ContainerStarted","Data":"7cfda9553d79f006714959aeb9659721ce71f56eb4acbbfb61560207b16635d7"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.922522 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-zlb62" event={"ID":"b4ff5527-1a84-4f96-a063-b74f0e220eb9","Type":"ContainerStarted","Data":"efee70f6d33f1f305d0e217820fc4e3c739f6bc29fceb8fbada21269a7ee964b"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.924843 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" event={"ID":"0c06264a-b3b5-4784-9ed9-05fb2c937105","Type":"ContainerStarted","Data":"8be5eda16aa58b1822fbf7f317d7a74270ad1f299e93aad013a09292bb8fc21a"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.926542 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" event={"ID":"2b49de6a-eb13-487d-b779-6cad65bbbc33","Type":"ContainerStarted","Data":"5653c4664f6f1fa580a805122de2ad96cab5ccfe473977ed84e78826957bb322"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.927539 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" event={"ID":"194f460d-e3a1-409f-b6ca-7338f62025c0","Type":"ContainerStarted","Data":"4622bea2854c5848543a97133aea6f291c4f2f1cf70d16a44c85bccde9f606b0"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.930228 4703 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-l4tvp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" start-of-body= Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.930329 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" podUID="194f460d-e3a1-409f-b6ca-7338f62025c0" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.930633 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.949377 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-v7hlb" event={"ID":"bb27e0bb-49da-4e6c-a7f7-e8addc64b009","Type":"ContainerStarted","Data":"fedb6827ef795384500f64310f3373086c2f30d92c47b8d6c38233ede4cade93"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.949423 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-v7hlb" event={"ID":"bb27e0bb-49da-4e6c-a7f7-e8addc64b009","Type":"ContainerStarted","Data":"841443bdcec5f7a44b05becc5ab4af2a1a5172b9a54307eb6e342c88a0d312c8"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.954319 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" podStartSLOduration=141.954304871 podStartE2EDuration="2m21.954304871s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:53.95325052 +0000 UTC m=+169.731072184" watchObservedRunningTime="2026-01-30 11:58:53.954304871 +0000 UTC m=+169.732126515" Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.954687 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bth9p" podStartSLOduration=141.954683101 podStartE2EDuration="2m21.954683101s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:53.903501028 +0000 UTC m=+169.681322682" watchObservedRunningTime="2026-01-30 11:58:53.954683101 +0000 UTC m=+169.732504755" Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.957288 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-mxjx6" event={"ID":"aa4aaaa5-396e-4e62-92a3-74b835af58a7","Type":"ContainerStarted","Data":"70dde461d8ef0383724a92d817faba63e7464a6eeb43fe4193addee302ed5001"} Jan 30 11:58:53 crc kubenswrapper[4703]: I0130 11:58:53.978284 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:54 crc kubenswrapper[4703]: E0130 11:58:53.979675 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:54.479660211 +0000 UTC m=+170.257481865 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:54 crc kubenswrapper[4703]: E0130 11:58:54.083776 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:54.583760097 +0000 UTC m=+170.361581751 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.093335 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pvx9n" event={"ID":"115e8738-ea93-49f0-a85b-9c59933c940c","Type":"ContainerStarted","Data":"39aebf1c11872a1e68f7c733c6e41403b251634edcd734ce8d17231e39a9d293"} Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.093742 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.119427 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp" event={"ID":"1ac1debf-d20c-4a3b-abec-64c6e317cb69","Type":"ContainerStarted","Data":"b97af0e64b6976f4f500417a206f2182dfdb43bcc7790a493303addac4889dc7"} Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.132677 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-sn2fc" event={"ID":"33681525-5de5-4f9e-a774-809bd0603c5a","Type":"ContainerStarted","Data":"7837dd56efc6cf8b660ba81a815a88c1c51a05a862376dcbd17dfcbf8046c508"} Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.141304 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-v7hlb" podStartSLOduration=142.141288761 podStartE2EDuration="2m22.141288761s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:54.133053768 +0000 UTC m=+169.910875422" watchObservedRunningTime="2026-01-30 11:58:54.141288761 +0000 UTC m=+169.919110415" Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.145471 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4c5n9" event={"ID":"a6520b48-04e3-4b95-8c18-0c51e2e41566","Type":"ContainerStarted","Data":"492f0e77b4be4cc6043337827cd9c9246eb4cee63b2c20bb35195fe8c1f94b5d"} Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.506854 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:54 crc kubenswrapper[4703]: E0130 11:58:54.507713 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:55.007690179 +0000 UTC m=+170.785511873 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.507756 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:54 crc kubenswrapper[4703]: E0130 11:58:54.514147 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:55.014115862 +0000 UTC m=+170.791937516 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.526673 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" event={"ID":"2ccbee61-4f6b-4070-ae06-071f1062ad2f","Type":"ContainerStarted","Data":"913f5273533d3b8f03ed521429a9b3a9e3327b05d5c5ea15c2cdb8dbd6c0dfda"} Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.526725 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" event={"ID":"2ccbee61-4f6b-4070-ae06-071f1062ad2f","Type":"ContainerStarted","Data":"ae45380d5dc8583fe62b6822baf48b5534ee8053e3244298e6a619e4c73f11b7"} Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.527707 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.533437 4703 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-22qwf container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.533487 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" podUID="2ccbee61-4f6b-4070-ae06-071f1062ad2f" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.533837 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-30 11:53:53 +0000 UTC, rotation deadline is 2026-10-19 11:37:19.728574493 +0000 UTC Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.533855 4703 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 6287h38m25.19472224s for next certificate rotation Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.539291 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-gkllf" event={"ID":"28191393-c4b1-4d80-9994-ca31868c9fb4","Type":"ContainerStarted","Data":"94a2c218c9b7b16ecd7390b989cdeb44794f39fa5b9c93949b04bcbff1b7f70e"} Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.547935 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-9pqdt" event={"ID":"8a7284fa-8f0a-461c-9753-4636c4f6e3f0","Type":"ContainerStarted","Data":"43a96c8bd247f6e10d1d36fc3e25aabc9d582f382d47ec641ddddad2cf1bc919"} Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.553848 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" event={"ID":"988a730f-bd1f-45b8-97a9-c14bea7d749e","Type":"ContainerStarted","Data":"b4487e2671579fd6dd4f7763f356f9134050ca59f8413a4247db18a53e863249"} Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.554761 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.558156 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" event={"ID":"ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3","Type":"ContainerStarted","Data":"c78612afe24c3e0792dd637639bf7736862789218426c7cf504cb290b7c08378"} Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.561289 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qwk4" event={"ID":"923a63f2-7c6f-4c55-a80f-ecf12184d88f","Type":"ContainerStarted","Data":"72fc3f1fd55ed6f19f1951fd8cf7e018242ac7c09d92de2850d2f364c604c576"} Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.569577 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-5zq9g" event={"ID":"61fedc4c-7586-4cdc-b38e-be8a6f3762a1","Type":"ContainerStarted","Data":"040d0b874db5aa30a27f75e51055b4f0da904cd99ac35ef57d97fa4a8e238d44"} Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.572428 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4xxv7" event={"ID":"3164cd87-2399-4b8f-8314-f663f2fc0b52","Type":"ContainerStarted","Data":"bfa2b31780029ddaa999bcdc3ac2482d22562bbefa49ad6b76d438d68130deba"} Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.573488 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pvx9n" podStartSLOduration=142.573473418 podStartE2EDuration="2m22.573473418s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:54.549816286 +0000 UTC m=+170.327637940" watchObservedRunningTime="2026-01-30 11:58:54.573473418 +0000 UTC m=+170.351295072" Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.575707 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-9pqdt" podStartSLOduration=142.575701362 podStartE2EDuration="2m22.575701362s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:54.573939111 +0000 UTC m=+170.351760785" watchObservedRunningTime="2026-01-30 11:58:54.575701362 +0000 UTC m=+170.353523016" Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.577295 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:58:54 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:58:54 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:58:54 crc kubenswrapper[4703]: healthz check failed Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.578989 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.588624 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.614535 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:54 crc kubenswrapper[4703]: E0130 11:58:54.615096 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:55.115056629 +0000 UTC m=+170.892878283 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.618310 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" podStartSLOduration=142.61826322 podStartE2EDuration="2m22.61826322s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:54.610778708 +0000 UTC m=+170.388600362" watchObservedRunningTime="2026-01-30 11:58:54.61826322 +0000 UTC m=+170.396084884" Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.623099 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:54 crc kubenswrapper[4703]: E0130 11:58:54.633193 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:55.133158194 +0000 UTC m=+170.910982008 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.675490 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" podStartSLOduration=142.675474835 podStartE2EDuration="2m22.675474835s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:54.671653576 +0000 UTC m=+170.449475230" watchObservedRunningTime="2026-01-30 11:58:54.675474835 +0000 UTC m=+170.453296489" Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.724002 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:54 crc kubenswrapper[4703]: E0130 11:58:54.725063 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:55.225035643 +0000 UTC m=+171.002857297 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.828697 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:54 crc kubenswrapper[4703]: E0130 11:58:54.829165 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:55.329149981 +0000 UTC m=+171.106971635 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.929489 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:54 crc kubenswrapper[4703]: E0130 11:58:54.929787 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:55.429751429 +0000 UTC m=+171.207573073 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:54 crc kubenswrapper[4703]: I0130 11:58:54.929854 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:54 crc kubenswrapper[4703]: E0130 11:58:54.930603 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:55.430589982 +0000 UTC m=+171.208411636 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.032587 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:55 crc kubenswrapper[4703]: E0130 11:58:55.032826 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:55.532803055 +0000 UTC m=+171.310624709 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.032864 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:55 crc kubenswrapper[4703]: E0130 11:58:55.033324 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:55.53331557 +0000 UTC m=+171.311137224 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.044700 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.049841 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" podStartSLOduration=143.049819768 podStartE2EDuration="2m23.049819768s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:55.033063482 +0000 UTC m=+170.810885136" watchObservedRunningTime="2026-01-30 11:58:55.049819768 +0000 UTC m=+170.827641422" Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.123360 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.133703 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:55 crc kubenswrapper[4703]: E0130 11:58:55.135072 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:55.635052579 +0000 UTC m=+171.412874243 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.261307 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:55 crc kubenswrapper[4703]: E0130 11:58:55.261837 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:55.761819171 +0000 UTC m=+171.539640825 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.438237 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:55 crc kubenswrapper[4703]: E0130 11:58:55.444730 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:55.944698715 +0000 UTC m=+171.722520359 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.559634 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:55 crc kubenswrapper[4703]: E0130 11:58:55.560047 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:56.060031781 +0000 UTC m=+171.837853445 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.657656 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:58:55 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:58:55 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:58:55 crc kubenswrapper[4703]: healthz check failed Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.657776 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.660487 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:55 crc kubenswrapper[4703]: E0130 11:58:55.661096 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:56.161074821 +0000 UTC m=+171.938896475 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.757867 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-xfvl5" event={"ID":"ebd3d4bc-ebd0-4435-b4c0-562043a8b0c3","Type":"ContainerStarted","Data":"d855d84e255d6152343180971549017cc7c9c34b68ceae1c73a019930a1a866d"} Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.765675 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:55 crc kubenswrapper[4703]: E0130 11:58:55.773184 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:56.273153625 +0000 UTC m=+172.050975279 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.791435 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qwk4" event={"ID":"923a63f2-7c6f-4c55-a80f-ecf12184d88f","Type":"ContainerStarted","Data":"bd5437768ddf8346980226ff948ad0747ea7201f549cb64f776970830e5da084"} Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.867201 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:55 crc kubenswrapper[4703]: E0130 11:58:55.867561 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:56.367545896 +0000 UTC m=+172.145367550 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.892414 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-pvx9n" event={"ID":"115e8738-ea93-49f0-a85b-9c59933c940c","Type":"ContainerStarted","Data":"4ca631b01ce2c9cce62bce71af778b35705fb85c29fd222fe661d3d50a6c2c10"} Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.931031 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8qs4" event={"ID":"575b58cd-4c7e-4d84-bf3e-182ebe6232e8","Type":"ContainerStarted","Data":"2dc259a01d9435b3a55fcbd2ae4a911644c941ce78811535c174f63c734d7008"} Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.942618 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-9pqdt" event={"ID":"8a7284fa-8f0a-461c-9753-4636c4f6e3f0","Type":"ContainerStarted","Data":"a6b1a93c85398b09c1aca24fb138c18b266a748146abbeadf84e8561a68b42a4"} Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.955808 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" event={"ID":"194f460d-e3a1-409f-b6ca-7338f62025c0","Type":"ContainerStarted","Data":"bc8cfde982795cab1098aa826d62a11fae9a19814d92f3adb4801a2119d1d3ea"} Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.958292 4703 patch_prober.go:28] interesting pod/console-operator-58897d9998-zwbps container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.958354 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zwbps" podUID="64f5798c-a6f3-4a7e-9b16-f3039aab5a23" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.959302 4703 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-l4tvp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" start-of-body= Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.959331 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" podUID="194f460d-e3a1-409f-b6ca-7338f62025c0" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.960084 4703 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-22qwf container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" start-of-body= Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.960137 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" podUID="2ccbee61-4f6b-4070-ae06-071f1062ad2f" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.41:8443/healthz\": dial tcp 10.217.0.41:8443: connect: connection refused" Jan 30 11:58:55 crc kubenswrapper[4703]: I0130 11:58:55.969072 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:55 crc kubenswrapper[4703]: E0130 11:58:55.976556 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:56.476537672 +0000 UTC m=+172.254359336 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:56 crc kubenswrapper[4703]: I0130 11:58:56.099514 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:56 crc kubenswrapper[4703]: I0130 11:58:56.099790 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs\") pod \"network-metrics-daemon-qrt92\" (UID: \"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\") " pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:56 crc kubenswrapper[4703]: E0130 11:58:56.099879 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:56.599850834 +0000 UTC m=+172.377672488 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:56 crc kubenswrapper[4703]: I0130 11:58:56.099982 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:56 crc kubenswrapper[4703]: E0130 11:58:56.101285 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:56.601272725 +0000 UTC m=+172.379094379 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:56 crc kubenswrapper[4703]: I0130 11:58:56.141595 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd-metrics-certs\") pod \"network-metrics-daemon-qrt92\" (UID: \"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd\") " pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:56 crc kubenswrapper[4703]: I0130 11:58:56.201022 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:56 crc kubenswrapper[4703]: E0130 11:58:56.201374 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:56.701355598 +0000 UTC m=+172.479177252 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:56 crc kubenswrapper[4703]: I0130 11:58:56.232528 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qrt92" Jan 30 11:58:56 crc kubenswrapper[4703]: I0130 11:58:56.302698 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:56 crc kubenswrapper[4703]: E0130 11:58:56.303162 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:56.803144499 +0000 UTC m=+172.580966153 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:56 crc kubenswrapper[4703]: I0130 11:58:56.403923 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:56 crc kubenswrapper[4703]: E0130 11:58:56.404385 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:56.904365665 +0000 UTC m=+172.682187329 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:56 crc kubenswrapper[4703]: I0130 11:58:56.418949 4703 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-g5vsj container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Jan 30 11:58:56 crc kubenswrapper[4703]: I0130 11:58:56.418991 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" podUID="988a730f-bd1f-45b8-97a9-c14bea7d749e" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Jan 30 11:58:56 crc kubenswrapper[4703]: I0130 11:58:56.419019 4703 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-g5vsj container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Jan 30 11:58:56 crc kubenswrapper[4703]: I0130 11:58:56.419068 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" podUID="988a730f-bd1f-45b8-97a9-c14bea7d749e" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Jan 30 11:58:56 crc kubenswrapper[4703]: I0130 11:58:56.530345 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:56 crc kubenswrapper[4703]: E0130 11:58:56.531034 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:57.031017912 +0000 UTC m=+172.808839566 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:56 crc kubenswrapper[4703]: I0130 11:58:56.571738 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:58:56 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:58:56 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:58:56 crc kubenswrapper[4703]: healthz check failed Jan 30 11:58:56 crc kubenswrapper[4703]: I0130 11:58:56.571813 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:58:56 crc kubenswrapper[4703]: I0130 11:58:56.644645 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:56 crc kubenswrapper[4703]: E0130 11:58:56.644988 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:57.144972279 +0000 UTC m=+172.922793933 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:56 crc kubenswrapper[4703]: I0130 11:58:56.751524 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:56 crc kubenswrapper[4703]: E0130 11:58:56.903730 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:57.403699248 +0000 UTC m=+173.181520902 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.000515 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:57 crc kubenswrapper[4703]: E0130 11:58:57.001068 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:57.501036383 +0000 UTC m=+173.278858057 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.090527 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jn8hp" event={"ID":"e0ce7c98-966a-4d09-b401-f6cb42c1b08e","Type":"ContainerStarted","Data":"35a28d7f90b8eca3ccf8b7119e4ba3b8b860e1170060d52c4ddf66bc21d058c9"} Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.102981 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:57 crc kubenswrapper[4703]: E0130 11:58:57.103378 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:57.6033659 +0000 UTC m=+173.381187554 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.166615 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4xxv7" event={"ID":"3164cd87-2399-4b8f-8314-f663f2fc0b52","Type":"ContainerStarted","Data":"1f3da0684392343d786cea047e5ab4dd21c87d9a7fc5b565e12c1b7751986a54"} Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.205016 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:57 crc kubenswrapper[4703]: E0130 11:58:57.205485 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:57.70546335 +0000 UTC m=+173.483285004 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.226839 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp" event={"ID":"1ac1debf-d20c-4a3b-abec-64c6e317cb69","Type":"ContainerStarted","Data":"713c9c7954a56bbcedc9b7320b94379b8e926fc8a47c0a8afa618aa6fe0b9a8f"} Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.260567 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-x56jp" podStartSLOduration=145.260544434 podStartE2EDuration="2m25.260544434s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:57.234995929 +0000 UTC m=+173.012817583" watchObservedRunningTime="2026-01-30 11:58:57.260544434 +0000 UTC m=+173.038366078" Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.260851 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-qrt92"] Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.291846 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-92h6w" event={"ID":"40e464c0-96c5-4c69-8537-ef29b93319ab","Type":"ContainerStarted","Data":"6732b7e72177bbc55623f0bbba4cad865017d1cc573d693e00b1cbd4c312affa"} Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.324741 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:57 crc kubenswrapper[4703]: E0130 11:58:57.325438 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:57.825421188 +0000 UTC m=+173.603242832 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.383460 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.383823 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.402346 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.402406 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.402421 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.402484 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.426451 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:57 crc kubenswrapper[4703]: E0130 11:58:57.427816 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:57.927797856 +0000 UTC m=+173.705619520 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.432437 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-zlb62" event={"ID":"b4ff5527-1a84-4f96-a063-b74f0e220eb9","Type":"ContainerStarted","Data":"091f97a7f1db866cd860e5bf01614a865b801fc911ba79389169a8dd6c48e4ea"} Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.433358 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-zlb62" Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.456662 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4c5n9" event={"ID":"a6520b48-04e3-4b95-8c18-0c51e2e41566","Type":"ContainerStarted","Data":"8bb14d0606b5470e2db0e1d40c91e3e9eef602ad948a848888d1d8a9fc47060e"} Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.460587 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" event={"ID":"0c06264a-b3b5-4784-9ed9-05fb2c937105","Type":"ContainerStarted","Data":"55575e45e5e480a0ac7fa4aba18033645c0c41f1ef88e48049fd45a597cdbe7d"} Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.465395 4703 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-l4tvp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" start-of-body= Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.465593 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" podUID="194f460d-e3a1-409f-b6ca-7338f62025c0" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.491855 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-22qwf" Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.536880 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:57 crc kubenswrapper[4703]: E0130 11:58:57.550515 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:58.05049524 +0000 UTC m=+173.828316894 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.617401 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.636199 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:58:57 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:58:57 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:58:57 crc kubenswrapper[4703]: healthz check failed Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.636259 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.638068 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:57 crc kubenswrapper[4703]: E0130 11:58:57.638562 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:58.138544801 +0000 UTC m=+173.916366455 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.662731 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-92h6w" podStartSLOduration=145.662713288 podStartE2EDuration="2m25.662713288s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:57.660618939 +0000 UTC m=+173.438440593" watchObservedRunningTime="2026-01-30 11:58:57.662713288 +0000 UTC m=+173.440534942" Jan 30 11:58:57 crc kubenswrapper[4703]: W0130 11:58:57.664959 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podad2f7dbf_9ce2_4f98_b62f_6c608184a6fd.slice/crio-e46d14bf0e7329d9bfc80e216f2732121039f717ed44b246307f3bdd94f5aa5a WatchSource:0}: Error finding container e46d14bf0e7329d9bfc80e216f2732121039f717ed44b246307f3bdd94f5aa5a: Status 404 returned error can't find the container with id e46d14bf0e7329d9bfc80e216f2732121039f717ed44b246307f3bdd94f5aa5a Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.752238 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:57 crc kubenswrapper[4703]: E0130 11:58:57.753097 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:58.253070035 +0000 UTC m=+174.030891689 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:57 crc kubenswrapper[4703]: I0130 11:58:57.872623 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:57 crc kubenswrapper[4703]: E0130 11:58:57.873036 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:58.373015752 +0000 UTC m=+174.150837406 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:57.982440 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:58 crc kubenswrapper[4703]: E0130 11:58:57.982987 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:58.482968855 +0000 UTC m=+174.260790509 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:57.984620 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-zlb62" podStartSLOduration=12.984596801 podStartE2EDuration="12.984596801s" podCreationTimestamp="2026-01-30 11:58:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:57.983345385 +0000 UTC m=+173.761167049" watchObservedRunningTime="2026-01-30 11:58:57.984596801 +0000 UTC m=+173.762418455" Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.087269 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:58 crc kubenswrapper[4703]: E0130 11:58:58.087757 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:58.587730411 +0000 UTC m=+174.365552065 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.192534 4703 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-l4tvp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" start-of-body= Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.192613 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" podUID="194f460d-e3a1-409f-b6ca-7338f62025c0" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.192535 4703 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-l4tvp container/catalog-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" start-of-body= Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.192689 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" podUID="194f460d-e3a1-409f-b6ca-7338f62025c0" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.193045 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:58 crc kubenswrapper[4703]: E0130 11:58:58.193668 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:58.69365344 +0000 UTC m=+174.471475094 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.342071 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:58 crc kubenswrapper[4703]: E0130 11:58:58.342662 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:58.842642572 +0000 UTC m=+174.620464226 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.352021 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxv85" podStartSLOduration=146.352002308 podStartE2EDuration="2m26.352002308s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:58.232317937 +0000 UTC m=+174.010139591" watchObservedRunningTime="2026-01-30 11:58:58.352002308 +0000 UTC m=+174.129823962" Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.494095 4703 patch_prober.go:28] interesting pod/console-operator-58897d9998-zwbps container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.494493 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zwbps" podUID="64f5798c-a6f3-4a7e-9b16-f3039aab5a23" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.494589 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:58 crc kubenswrapper[4703]: E0130 11:58:58.495203 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:58.995190345 +0000 UTC m=+174.773011999 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.603432 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.603729 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:58:58 crc kubenswrapper[4703]: E0130 11:58:58.603866 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:59.103851181 +0000 UTC m=+174.881672835 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.692215 4703 patch_prober.go:28] interesting pod/console-operator-58897d9998-zwbps container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.692259 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-zwbps" podUID="64f5798c-a6f3-4a7e-9b16-f3039aab5a23" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.695169 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:58:58 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:58:58 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:58:58 crc kubenswrapper[4703]: healthz check failed Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.695223 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.705281 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:58 crc kubenswrapper[4703]: E0130 11:58:58.705819 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:59.205798906 +0000 UTC m=+174.983620560 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.715010 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-v286d" event={"ID":"66ebbbe6-076c-4d9b-a2e2-5aea0188b8bb","Type":"ContainerStarted","Data":"cb492ae80d7337f4c95972f01061c16cfe4465025d4b3b9ea734dd661311b0ef"} Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.725621 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" event={"ID":"26730f5b-96f6-40f6-ab66-0500a306f988","Type":"ContainerStarted","Data":"faa23bfdc48b30d1d1dd1a23b9a6735ff7b13b8a16d2d63ba105d51c85c0d076"} Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.729344 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.729537 4703 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-wd7g9 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.729590 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" podUID="26730f5b-96f6-40f6-ab66-0500a306f988" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.835238 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:58 crc kubenswrapper[4703]: E0130 11:58:58.835903 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:59.335885762 +0000 UTC m=+175.113707416 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.837427 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-4c5n9" podStartSLOduration=147.837402525 podStartE2EDuration="2m27.837402525s" podCreationTimestamp="2026-01-30 11:56:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:58.834752161 +0000 UTC m=+174.612573815" watchObservedRunningTime="2026-01-30 11:58:58.837402525 +0000 UTC m=+174.615224179" Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.841491 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:58 crc kubenswrapper[4703]: E0130 11:58:58.843436 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:59.343422806 +0000 UTC m=+175.121244460 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.846039 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4xxv7" event={"ID":"3164cd87-2399-4b8f-8314-f663f2fc0b52","Type":"ContainerStarted","Data":"999879dcb24a54e080e0c36405c39bfb775df58e16664b8792dc5e369649014d"} Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.846681 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4xxv7" Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.848305 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" event={"ID":"897e6904-d07b-468a-9eff-504d8ce80db1","Type":"ContainerStarted","Data":"1a2a44aae54d293ede4b4fe01227b62dd549ed075e7f7a403523f018c0e2fea7"} Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.935204 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" podStartSLOduration=146.935190083 podStartE2EDuration="2m26.935190083s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:58.934403691 +0000 UTC m=+174.712225345" watchObservedRunningTime="2026-01-30 11:58:58.935190083 +0000 UTC m=+174.713011727" Jan 30 11:58:58 crc kubenswrapper[4703]: I0130 11:58:58.966542 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:58 crc kubenswrapper[4703]: E0130 11:58:58.967797 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:59.467778139 +0000 UTC m=+175.245599793 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.001401 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-sn2fc" event={"ID":"33681525-5de5-4f9e-a774-809bd0603c5a","Type":"ContainerStarted","Data":"0e4fd892eb47bb5dbee19ba095901af9aa6592a66325927054affcd46f539875"} Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.023553 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-v286d" podStartSLOduration=147.023531872 podStartE2EDuration="2m27.023531872s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:59.02134514 +0000 UTC m=+174.799166814" watchObservedRunningTime="2026-01-30 11:58:59.023531872 +0000 UTC m=+174.801353526" Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.081596 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:59 crc kubenswrapper[4703]: E0130 11:58:59.082652 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:59.58259981 +0000 UTC m=+175.360421464 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.085345 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-5zq9g" event={"ID":"61fedc4c-7586-4cdc-b38e-be8a6f3762a1","Type":"ContainerStarted","Data":"b8e3e42afd557f832a47868addea1143f05739942338c6a3a94e6d8680426c8a"} Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.112374 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" event={"ID":"2eaf3e49-2394-495c-926e-5504ff81ccc5","Type":"ContainerStarted","Data":"854d15c00e6754d489a505fd2e9138494328c64efebf46f08e1c4a3040ebbed5"} Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.125593 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-xmjbx" event={"ID":"c271501b-03fc-4c15-b1d7-e705cbcf16eb","Type":"ContainerStarted","Data":"e9f78e7235c2a9860e07b3028a65d23a105153a940c1be7cf1ba1b2d586279c3"} Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.184300 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:59 crc kubenswrapper[4703]: E0130 11:58:59.186224 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:59.686207804 +0000 UTC m=+175.464029468 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.265331 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-sxv85" event={"ID":"47544af6-cdac-444e-9c16-37ded5e11e28","Type":"ContainerStarted","Data":"1ae640b9e65749b40c5c8e30ab03f00c09dda8caa43084b143ebad5509533629"} Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.274515 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" event={"ID":"2b49de6a-eb13-487d-b779-6cad65bbbc33","Type":"ContainerStarted","Data":"7306c1cb54e4c8043a422588ff46cfa290a8cbf6a364f031241109ae01d05392"} Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.288490 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:59 crc kubenswrapper[4703]: E0130 11:58:59.289604 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:58:59.789591909 +0000 UTC m=+175.567413563 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.299303 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" event={"ID":"de735d2c-8817-415e-b190-df96bf922407","Type":"ContainerStarted","Data":"6ed753559dbbf50bcd54da3979f51b370589030aa99ce72a6d8748a279fd3be6"} Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.303874 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qwk4" event={"ID":"923a63f2-7c6f-4c55-a80f-ecf12184d88f","Type":"ContainerStarted","Data":"e61e8c116e59c0751a581fcfd47ff3b7877ab810b72a7f9a272982e5cbbbfd9f"} Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.306733 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" event={"ID":"0c06264a-b3b5-4784-9ed9-05fb2c937105","Type":"ContainerStarted","Data":"dadc38a7140687e726bdaa4b26dc26387bb81d55f20d51d720a9f450866784d7"} Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.380464 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" event={"ID":"8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba","Type":"ContainerStarted","Data":"dbe330488a4bee8734a96963a6674b5a85466ddcd0642090e6b58d1f24494ddb"} Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.381681 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.381718 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4xxv7" podStartSLOduration=147.381698755 podStartE2EDuration="2m27.381698755s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:59.381246553 +0000 UTC m=+175.159068207" watchObservedRunningTime="2026-01-30 11:58:59.381698755 +0000 UTC m=+175.159520409" Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.382670 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-vt7hb" podStartSLOduration=147.382665133 podStartE2EDuration="2m27.382665133s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:59.234112274 +0000 UTC m=+175.011933928" watchObservedRunningTime="2026-01-30 11:58:59.382665133 +0000 UTC m=+175.160486777" Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.384061 4703 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-9lxbz container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:5443/healthz\": dial tcp 10.217.0.21:5443: connect: connection refused" start-of-body= Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.384149 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" podUID="8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.21:5443/healthz\": dial tcp 10.217.0.21:5443: connect: connection refused" Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.391516 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:59 crc kubenswrapper[4703]: E0130 11:58:59.392875 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:58:59.892837392 +0000 UTC m=+175.670659046 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.394355 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" event={"ID":"56158b04-1a02-453d-b48a-a107343a3955","Type":"ContainerStarted","Data":"dbeece1e92f2d9f2c556e8d001ffd7952f1584801fd06fe35cd04bb77ef31785"} Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.395320 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.408465 4703 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-d4tv6 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.6:6443/healthz\": dial tcp 10.217.0.6:6443: connect: connection refused" start-of-body= Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.408576 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" podUID="56158b04-1a02-453d-b48a-a107343a3955" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.6:6443/healthz\": dial tcp 10.217.0.6:6443: connect: connection refused" Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.409219 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-sn2fc" podStartSLOduration=147.409191727 podStartE2EDuration="2m27.409191727s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:59.406518461 +0000 UTC m=+175.184340125" watchObservedRunningTime="2026-01-30 11:58:59.409191727 +0000 UTC m=+175.187013381" Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.467076 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-qrt92" event={"ID":"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd","Type":"ContainerStarted","Data":"e46d14bf0e7329d9bfc80e216f2732121039f717ed44b246307f3bdd94f5aa5a"} Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.476149 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-g5vsj" Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.486812 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-5zq9g" podStartSLOduration=14.486792981 podStartE2EDuration="14.486792981s" podCreationTimestamp="2026-01-30 11:58:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:59.484521457 +0000 UTC m=+175.262343111" watchObservedRunningTime="2026-01-30 11:58:59.486792981 +0000 UTC m=+175.264614635" Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.493233 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:59 crc kubenswrapper[4703]: E0130 11:58:59.564262 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:00.064227451 +0000 UTC m=+175.842049105 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.564702 4703 generic.go:334] "Generic (PLEG): container finished" podID="28191393-c4b1-4d80-9994-ca31868c9fb4" containerID="8b3edfc79c4a78efa8c764583e71c43a702cd43ae59befd9d795757d5ab6147f" exitCode=0 Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.564866 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-gkllf" event={"ID":"28191393-c4b1-4d80-9994-ca31868c9fb4","Type":"ContainerDied","Data":"8b3edfc79c4a78efa8c764583e71c43a702cd43ae59befd9d795757d5ab6147f"} Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.566331 4703 patch_prober.go:28] interesting pod/console-operator-58897d9998-zwbps container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.566382 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zwbps" podUID="64f5798c-a6f3-4a7e-9b16-f3039aab5a23" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.592575 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:58:59 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:58:59 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:58:59 crc kubenswrapper[4703]: healthz check failed Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.593023 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.594928 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:59 crc kubenswrapper[4703]: E0130 11:58:59.600138 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:00.100087299 +0000 UTC m=+175.877908953 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.603643 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8qs4" event={"ID":"575b58cd-4c7e-4d84-bf3e-182ebe6232e8","Type":"ContainerStarted","Data":"9db2fa053d9020179bdb589d4970c1e04144d766cca2924d469442ccdf696d34"} Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.629976 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" podStartSLOduration=148.629954198 podStartE2EDuration="2m28.629954198s" podCreationTimestamp="2026-01-30 11:56:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:59.605440041 +0000 UTC m=+175.383261695" watchObservedRunningTime="2026-01-30 11:58:59.629954198 +0000 UTC m=+175.407775852" Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.666074 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-mxjx6" event={"ID":"aa4aaaa5-396e-4e62-92a3-74b835af58a7","Type":"ContainerStarted","Data":"935eb1d863568ff69429b034680d848c693ddc18879d2ee1a12e82cdb2781307"} Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.707884 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-cz789" Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.708673 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:58:59 crc kubenswrapper[4703]: E0130 11:58:59.711700 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:00.211685709 +0000 UTC m=+175.989507363 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.728819 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-8tl5d" podStartSLOduration=147.728792565 podStartE2EDuration="2m27.728792565s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:59.664776136 +0000 UTC m=+175.442597790" watchObservedRunningTime="2026-01-30 11:58:59.728792565 +0000 UTC m=+175.506614219" Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.771186 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" podStartSLOduration=147.771162768 podStartE2EDuration="2m27.771162768s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:59.730616436 +0000 UTC m=+175.508438090" watchObservedRunningTime="2026-01-30 11:58:59.771162768 +0000 UTC m=+175.548984422" Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.812242 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:58:59 crc kubenswrapper[4703]: E0130 11:58:59.812507 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:00.312492413 +0000 UTC m=+176.090314057 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:58:59 crc kubenswrapper[4703]: I0130 11:58:59.913798 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:00 crc kubenswrapper[4703]: E0130 11:58:59.914191 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:00.41417823 +0000 UTC m=+176.191999874 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:00 crc kubenswrapper[4703]: I0130 11:59:00.014699 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:00 crc kubenswrapper[4703]: E0130 11:59:00.015261 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:00.515239722 +0000 UTC m=+176.293061386 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:00 crc kubenswrapper[4703]: I0130 11:59:00.030855 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8qwk4" podStartSLOduration=148.030835135 podStartE2EDuration="2m28.030835135s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:58:59.892101604 +0000 UTC m=+175.669923258" watchObservedRunningTime="2026-01-30 11:59:00.030835135 +0000 UTC m=+175.808656789" Jan 30 11:59:00 crc kubenswrapper[4703]: I0130 11:59:00.089071 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" podStartSLOduration=148.089051628 podStartE2EDuration="2m28.089051628s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:59:00.036274159 +0000 UTC m=+175.814095813" watchObservedRunningTime="2026-01-30 11:59:00.089051628 +0000 UTC m=+175.866873272" Jan 30 11:59:00 crc kubenswrapper[4703]: I0130 11:59:00.119636 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:00 crc kubenswrapper[4703]: E0130 11:59:00.120272 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:00.620255905 +0000 UTC m=+176.398077559 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:00 crc kubenswrapper[4703]: I0130 11:59:00.221558 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:00 crc kubenswrapper[4703]: E0130 11:59:00.221748 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:00.721719796 +0000 UTC m=+176.499541450 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:00 crc kubenswrapper[4703]: I0130 11:59:00.221864 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:00 crc kubenswrapper[4703]: E0130 11:59:00.222339 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:00.722331534 +0000 UTC m=+176.500153188 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:00 crc kubenswrapper[4703]: I0130 11:59:00.229609 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-mxjx6" podStartSLOduration=148.22958873 podStartE2EDuration="2m28.22958873s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:59:00.114286985 +0000 UTC m=+175.892108639" watchObservedRunningTime="2026-01-30 11:59:00.22958873 +0000 UTC m=+176.007410384" Jan 30 11:59:00 crc kubenswrapper[4703]: E0130 11:59:00.354359 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:00.854316868 +0000 UTC m=+176.632138522 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:00 crc kubenswrapper[4703]: I0130 11:59:00.355962 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:00 crc kubenswrapper[4703]: I0130 11:59:00.362653 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:00 crc kubenswrapper[4703]: E0130 11:59:00.363162 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:00.863146269 +0000 UTC m=+176.640967923 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:00 crc kubenswrapper[4703]: I0130 11:59:00.464002 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:00 crc kubenswrapper[4703]: E0130 11:59:00.464556 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:00.964535146 +0000 UTC m=+176.742356800 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:00 crc kubenswrapper[4703]: I0130 11:59:00.547013 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:00 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:00 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:00 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:00 crc kubenswrapper[4703]: I0130 11:59:00.547057 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:00 crc kubenswrapper[4703]: I0130 11:59:00.566222 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:00 crc kubenswrapper[4703]: E0130 11:59:00.566607 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:01.066595582 +0000 UTC m=+176.844417226 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:00 crc kubenswrapper[4703]: I0130 11:59:00.667416 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:00 crc kubenswrapper[4703]: E0130 11:59:00.668093 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:01.168073815 +0000 UTC m=+176.945895469 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:00 crc kubenswrapper[4703]: I0130 11:59:00.787075 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:00 crc kubenswrapper[4703]: E0130 11:59:00.787527 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:01.287512648 +0000 UTC m=+177.065334302 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:00 crc kubenswrapper[4703]: I0130 11:59:00.891409 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:00 crc kubenswrapper[4703]: E0130 11:59:00.891705 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:01.391682967 +0000 UTC m=+177.169504621 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:00 crc kubenswrapper[4703]: I0130 11:59:00.917546 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jn8hp" event={"ID":"e0ce7c98-966a-4d09-b401-f6cb42c1b08e","Type":"ContainerStarted","Data":"c7b57cecbca3b2dddc96568619b1a9f945d44032b30e6ce14584fcc2d2bb304f"} Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.000688 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:01 crc kubenswrapper[4703]: E0130 11:59:01.001136 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:01.501105065 +0000 UTC m=+177.278926739 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.002493 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-xmjbx" event={"ID":"c271501b-03fc-4c15-b1d7-e705cbcf16eb","Type":"ContainerStarted","Data":"da334cd3d731c22a6a0e9e63f1bd3c4859e7cabe0b0875ac5d21c688b888264c"} Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.012639 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" event={"ID":"2b49de6a-eb13-487d-b779-6cad65bbbc33","Type":"ContainerStarted","Data":"da1f26dfd8321d12a729693fb14ff11779187493da0932c4825a7dc3403dd928"} Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.021722 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-qrt92" event={"ID":"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd","Type":"ContainerStarted","Data":"64f0a0028941cdca9646aa001c0e411293e1f8a3642d033c9b4dec2dcc141a27"} Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.021768 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-qrt92" event={"ID":"ad2f7dbf-9ce2-4f98-b62f-6c608184a6fd","Type":"ContainerStarted","Data":"d544b064a0a3993bc0d4d1e565a0e0f3e6d715ba9cde23fbf2f9f687b57af490"} Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.044945 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-gkllf" event={"ID":"28191393-c4b1-4d80-9994-ca31868c9fb4","Type":"ContainerStarted","Data":"196b55c3135a95cf0e8fa700de58fd7d142be459c0b571c66fb955c5cd529405"} Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.060469 4703 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-9lxbz container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:5443/healthz\": dial tcp 10.217.0.21:5443: connect: connection refused" start-of-body= Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.060554 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" podUID="8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.21:5443/healthz\": dial tcp 10.217.0.21:5443: connect: connection refused" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.061344 4703 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-d4tv6 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.6:6443/healthz\": dial tcp 10.217.0.6:6443: connect: connection refused" start-of-body= Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.061404 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" podUID="56158b04-1a02-453d-b48a-a107343a3955" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.6:6443/healthz\": dial tcp 10.217.0.6:6443: connect: connection refused" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.061506 4703 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-wd7g9 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.061531 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" podUID="26730f5b-96f6-40f6-ab66-0500a306f988" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.104046 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:01 crc kubenswrapper[4703]: E0130 11:59:01.106397 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:01.606373595 +0000 UTC m=+177.384195249 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.189607 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-jn8hp" podStartSLOduration=149.189587659 podStartE2EDuration="2m29.189587659s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:59:01.163361244 +0000 UTC m=+176.941182908" watchObservedRunningTime="2026-01-30 11:59:01.189587659 +0000 UTC m=+176.967409313" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.190877 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-b8qs4" podStartSLOduration=149.190871805 podStartE2EDuration="2m29.190871805s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:59:00.542689753 +0000 UTC m=+176.320511407" watchObservedRunningTime="2026-01-30 11:59:01.190871805 +0000 UTC m=+176.968693459" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.208052 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:01 crc kubenswrapper[4703]: E0130 11:59:01.208751 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:01.708731593 +0000 UTC m=+177.486553247 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.214923 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.216659 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.226261 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.226749 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.266150 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.309213 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.309680 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/df97e9d9-5d6d-49e9-972d-84fbfb77cb2f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"df97e9d9-5d6d-49e9-972d-84fbfb77cb2f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.309738 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/df97e9d9-5d6d-49e9-972d-84fbfb77cb2f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"df97e9d9-5d6d-49e9-972d-84fbfb77cb2f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 30 11:59:01 crc kubenswrapper[4703]: E0130 11:59:01.309869 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:01.809855295 +0000 UTC m=+177.587676949 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.333085 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-xmjbx" podStartSLOduration=149.333067225 podStartE2EDuration="2m29.333067225s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:59:01.331903891 +0000 UTC m=+177.109725545" watchObservedRunningTime="2026-01-30 11:59:01.333067225 +0000 UTC m=+177.110888879" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.384431 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-qrt92" podStartSLOduration=149.384397243 podStartE2EDuration="2m29.384397243s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:59:01.382306813 +0000 UTC m=+177.160128467" watchObservedRunningTime="2026-01-30 11:59:01.384397243 +0000 UTC m=+177.162218897" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.410697 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/df97e9d9-5d6d-49e9-972d-84fbfb77cb2f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"df97e9d9-5d6d-49e9-972d-84fbfb77cb2f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.410809 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.410868 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/df97e9d9-5d6d-49e9-972d-84fbfb77cb2f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"df97e9d9-5d6d-49e9-972d-84fbfb77cb2f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.411290 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/df97e9d9-5d6d-49e9-972d-84fbfb77cb2f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"df97e9d9-5d6d-49e9-972d-84fbfb77cb2f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 30 11:59:01 crc kubenswrapper[4703]: E0130 11:59:01.411596 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:01.911581675 +0000 UTC m=+177.689403339 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.472869 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-w7r6v" podStartSLOduration=149.472852676 podStartE2EDuration="2m29.472852676s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:59:01.420514228 +0000 UTC m=+177.198335882" watchObservedRunningTime="2026-01-30 11:59:01.472852676 +0000 UTC m=+177.250674330" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.485061 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/df97e9d9-5d6d-49e9-972d-84fbfb77cb2f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"df97e9d9-5d6d-49e9-972d-84fbfb77cb2f\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.513643 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:01 crc kubenswrapper[4703]: E0130 11:59:01.514019 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:02.013995924 +0000 UTC m=+177.791817578 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.543309 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:01 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:01 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:01 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.543371 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.586106 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.615677 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:01 crc kubenswrapper[4703]: E0130 11:59:01.616009 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:02.115996941 +0000 UTC m=+177.893818595 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.718745 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:01 crc kubenswrapper[4703]: E0130 11:59:01.718993 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:02.218970787 +0000 UTC m=+177.996792471 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.819852 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:01 crc kubenswrapper[4703]: E0130 11:59:01.820462 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:02.320449949 +0000 UTC m=+178.098271603 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.848753 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-988gc"] Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.849662 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-988gc" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.863813 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.884454 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-988gc"] Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.932650 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:01 crc kubenswrapper[4703]: E0130 11:59:01.932776 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:02.432760249 +0000 UTC m=+178.210581903 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:01 crc kubenswrapper[4703]: I0130 11:59:01.933010 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:01 crc kubenswrapper[4703]: E0130 11:59:01.933389 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:02.433378737 +0000 UTC m=+178.211200391 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.034135 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.034403 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e4915d0-912f-426c-9d74-3d42e36678ed-utilities\") pod \"community-operators-988gc\" (UID: \"4e4915d0-912f-426c-9d74-3d42e36678ed\") " pod="openshift-marketplace/community-operators-988gc" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.034428 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdwdv\" (UniqueName: \"kubernetes.io/projected/4e4915d0-912f-426c-9d74-3d42e36678ed-kube-api-access-cdwdv\") pod \"community-operators-988gc\" (UID: \"4e4915d0-912f-426c-9d74-3d42e36678ed\") " pod="openshift-marketplace/community-operators-988gc" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.034508 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e4915d0-912f-426c-9d74-3d42e36678ed-catalog-content\") pod \"community-operators-988gc\" (UID: \"4e4915d0-912f-426c-9d74-3d42e36678ed\") " pod="openshift-marketplace/community-operators-988gc" Jan 30 11:59:02 crc kubenswrapper[4703]: E0130 11:59:02.034639 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:02.534619022 +0000 UTC m=+178.312440676 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.059302 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-gkllf" event={"ID":"28191393-c4b1-4d80-9994-ca31868c9fb4","Type":"ContainerStarted","Data":"0c68ebe9f96eea093a9a5ae8eef98195b3a9ba9612c0739dcb3d98e6d6c5911f"} Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.070964 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" event={"ID":"de735d2c-8817-415e-b190-df96bf922407","Type":"ContainerStarted","Data":"596fe5cdf77c4a547c39fd21f637afbeceae7b92b6ec6807b3aa9f617bec42a3"} Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.071100 4703 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-d4tv6 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.6:6443/healthz\": dial tcp 10.217.0.6:6443: connect: connection refused" start-of-body= Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.071164 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" podUID="56158b04-1a02-453d-b48a-a107343a3955" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.6:6443/healthz\": dial tcp 10.217.0.6:6443: connect: connection refused" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.071996 4703 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-wd7g9 container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.072041 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" podUID="26730f5b-96f6-40f6-ab66-0500a306f988" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.072647 4703 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-9lxbz container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:5443/healthz\": dial tcp 10.217.0.21:5443: connect: connection refused" start-of-body= Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.072689 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" podUID="8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.21:5443/healthz\": dial tcp 10.217.0.21:5443: connect: connection refused" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.097094 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rsvj7"] Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.098091 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rsvj7" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.109329 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.122219 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rsvj7"] Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.138181 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e4915d0-912f-426c-9d74-3d42e36678ed-catalog-content\") pod \"community-operators-988gc\" (UID: \"4e4915d0-912f-426c-9d74-3d42e36678ed\") " pod="openshift-marketplace/community-operators-988gc" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.138252 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.138376 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11176772-9170-499f-8fec-e460709fd300-catalog-content\") pod \"certified-operators-rsvj7\" (UID: \"11176772-9170-499f-8fec-e460709fd300\") " pod="openshift-marketplace/certified-operators-rsvj7" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.138477 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tnmw\" (UniqueName: \"kubernetes.io/projected/11176772-9170-499f-8fec-e460709fd300-kube-api-access-4tnmw\") pod \"certified-operators-rsvj7\" (UID: \"11176772-9170-499f-8fec-e460709fd300\") " pod="openshift-marketplace/certified-operators-rsvj7" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.138605 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e4915d0-912f-426c-9d74-3d42e36678ed-utilities\") pod \"community-operators-988gc\" (UID: \"4e4915d0-912f-426c-9d74-3d42e36678ed\") " pod="openshift-marketplace/community-operators-988gc" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.138634 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdwdv\" (UniqueName: \"kubernetes.io/projected/4e4915d0-912f-426c-9d74-3d42e36678ed-kube-api-access-cdwdv\") pod \"community-operators-988gc\" (UID: \"4e4915d0-912f-426c-9d74-3d42e36678ed\") " pod="openshift-marketplace/community-operators-988gc" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.138819 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11176772-9170-499f-8fec-e460709fd300-utilities\") pod \"certified-operators-rsvj7\" (UID: \"11176772-9170-499f-8fec-e460709fd300\") " pod="openshift-marketplace/certified-operators-rsvj7" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.140686 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e4915d0-912f-426c-9d74-3d42e36678ed-catalog-content\") pod \"community-operators-988gc\" (UID: \"4e4915d0-912f-426c-9d74-3d42e36678ed\") " pod="openshift-marketplace/community-operators-988gc" Jan 30 11:59:02 crc kubenswrapper[4703]: E0130 11:59:02.140947 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:02.640935272 +0000 UTC m=+178.418756926 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.144948 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-gkllf" podStartSLOduration=150.144930325 podStartE2EDuration="2m30.144930325s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:59:02.144482512 +0000 UTC m=+177.922304166" watchObservedRunningTime="2026-01-30 11:59:02.144930325 +0000 UTC m=+177.922751989" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.159825 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e4915d0-912f-426c-9d74-3d42e36678ed-utilities\") pod \"community-operators-988gc\" (UID: \"4e4915d0-912f-426c-9d74-3d42e36678ed\") " pod="openshift-marketplace/community-operators-988gc" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.254789 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdwdv\" (UniqueName: \"kubernetes.io/projected/4e4915d0-912f-426c-9d74-3d42e36678ed-kube-api-access-cdwdv\") pod \"community-operators-988gc\" (UID: \"4e4915d0-912f-426c-9d74-3d42e36678ed\") " pod="openshift-marketplace/community-operators-988gc" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.283417 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:02 crc kubenswrapper[4703]: E0130 11:59:02.283649 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:02.783607615 +0000 UTC m=+178.561429269 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.283860 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11176772-9170-499f-8fec-e460709fd300-utilities\") pod \"certified-operators-rsvj7\" (UID: \"11176772-9170-499f-8fec-e460709fd300\") " pod="openshift-marketplace/certified-operators-rsvj7" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.283987 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.284023 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11176772-9170-499f-8fec-e460709fd300-catalog-content\") pod \"certified-operators-rsvj7\" (UID: \"11176772-9170-499f-8fec-e460709fd300\") " pod="openshift-marketplace/certified-operators-rsvj7" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.284173 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tnmw\" (UniqueName: \"kubernetes.io/projected/11176772-9170-499f-8fec-e460709fd300-kube-api-access-4tnmw\") pod \"certified-operators-rsvj7\" (UID: \"11176772-9170-499f-8fec-e460709fd300\") " pod="openshift-marketplace/certified-operators-rsvj7" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.284418 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11176772-9170-499f-8fec-e460709fd300-utilities\") pod \"certified-operators-rsvj7\" (UID: \"11176772-9170-499f-8fec-e460709fd300\") " pod="openshift-marketplace/certified-operators-rsvj7" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.284514 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11176772-9170-499f-8fec-e460709fd300-catalog-content\") pod \"certified-operators-rsvj7\" (UID: \"11176772-9170-499f-8fec-e460709fd300\") " pod="openshift-marketplace/certified-operators-rsvj7" Jan 30 11:59:02 crc kubenswrapper[4703]: E0130 11:59:02.284834 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:02.784813059 +0000 UTC m=+178.562634813 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.344638 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tnmw\" (UniqueName: \"kubernetes.io/projected/11176772-9170-499f-8fec-e460709fd300-kube-api-access-4tnmw\") pod \"certified-operators-rsvj7\" (UID: \"11176772-9170-499f-8fec-e460709fd300\") " pod="openshift-marketplace/certified-operators-rsvj7" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.373686 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8gf7w"] Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.374658 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8gf7w" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.464023 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.464243 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-catalog-content\") pod \"community-operators-8gf7w\" (UID: \"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63\") " pod="openshift-marketplace/community-operators-8gf7w" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.464277 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-utilities\") pod \"community-operators-8gf7w\" (UID: \"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63\") " pod="openshift-marketplace/community-operators-8gf7w" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.464315 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-546ch\" (UniqueName: \"kubernetes.io/projected/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-kube-api-access-546ch\") pod \"community-operators-8gf7w\" (UID: \"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63\") " pod="openshift-marketplace/community-operators-8gf7w" Jan 30 11:59:02 crc kubenswrapper[4703]: E0130 11:59:02.464400 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:02.96438607 +0000 UTC m=+178.742207724 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.464505 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rsvj7" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.559978 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8gf7w"] Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.560233 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-988gc" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.570360 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:02 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:02 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:02 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.570464 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.576476 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:02 crc kubenswrapper[4703]: E0130 11:59:02.578107 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:03.078074189 +0000 UTC m=+178.855895843 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.596441 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-catalog-content\") pod \"community-operators-8gf7w\" (UID: \"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63\") " pod="openshift-marketplace/community-operators-8gf7w" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.596558 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-utilities\") pod \"community-operators-8gf7w\" (UID: \"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63\") " pod="openshift-marketplace/community-operators-8gf7w" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.596665 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-546ch\" (UniqueName: \"kubernetes.io/projected/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-kube-api-access-546ch\") pod \"community-operators-8gf7w\" (UID: \"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63\") " pod="openshift-marketplace/community-operators-8gf7w" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.597179 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-catalog-content\") pod \"community-operators-8gf7w\" (UID: \"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63\") " pod="openshift-marketplace/community-operators-8gf7w" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.597464 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-utilities\") pod \"community-operators-8gf7w\" (UID: \"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63\") " pod="openshift-marketplace/community-operators-8gf7w" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.621206 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-64ptg"] Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.622913 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-64ptg" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.693264 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-64ptg"] Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.698140 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-546ch\" (UniqueName: \"kubernetes.io/projected/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-kube-api-access-546ch\") pod \"community-operators-8gf7w\" (UID: \"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63\") " pod="openshift-marketplace/community-operators-8gf7w" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.698648 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.699512 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wh47z\" (UniqueName: \"kubernetes.io/projected/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-kube-api-access-wh47z\") pod \"certified-operators-64ptg\" (UID: \"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa\") " pod="openshift-marketplace/certified-operators-64ptg" Jan 30 11:59:02 crc kubenswrapper[4703]: E0130 11:59:02.713715 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:03.213681021 +0000 UTC m=+178.991502675 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.713786 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-utilities\") pod \"certified-operators-64ptg\" (UID: \"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa\") " pod="openshift-marketplace/certified-operators-64ptg" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.713906 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-catalog-content\") pod \"certified-operators-64ptg\" (UID: \"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa\") " pod="openshift-marketplace/certified-operators-64ptg" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.714011 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:02 crc kubenswrapper[4703]: E0130 11:59:02.714315 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:03.214308838 +0000 UTC m=+178.992130492 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.878232 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.879487 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-catalog-content\") pod \"certified-operators-64ptg\" (UID: \"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa\") " pod="openshift-marketplace/certified-operators-64ptg" Jan 30 11:59:02 crc kubenswrapper[4703]: E0130 11:59:02.879616 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:03.379588343 +0000 UTC m=+179.157409997 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.879719 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.879759 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wh47z\" (UniqueName: \"kubernetes.io/projected/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-kube-api-access-wh47z\") pod \"certified-operators-64ptg\" (UID: \"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa\") " pod="openshift-marketplace/certified-operators-64ptg" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.879814 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-utilities\") pod \"certified-operators-64ptg\" (UID: \"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa\") " pod="openshift-marketplace/certified-operators-64ptg" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.880283 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-catalog-content\") pod \"certified-operators-64ptg\" (UID: \"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa\") " pod="openshift-marketplace/certified-operators-64ptg" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.880359 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-utilities\") pod \"certified-operators-64ptg\" (UID: \"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa\") " pod="openshift-marketplace/certified-operators-64ptg" Jan 30 11:59:02 crc kubenswrapper[4703]: E0130 11:59:02.880667 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:03.380656203 +0000 UTC m=+179.158477857 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.909181 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8gf7w" Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.981922 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:02 crc kubenswrapper[4703]: E0130 11:59:02.982466 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:03.482434275 +0000 UTC m=+179.260255949 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:02 crc kubenswrapper[4703]: I0130 11:59:02.982687 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:02 crc kubenswrapper[4703]: E0130 11:59:02.983243 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:03.483219407 +0000 UTC m=+179.261041061 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.095753 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:03 crc kubenswrapper[4703]: E0130 11:59:03.096724 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:03.59670715 +0000 UTC m=+179.374528804 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.210974 4703 patch_prober.go:28] interesting pod/apiserver-76f77b778f-gkllf container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.25:8443/livez\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.211048 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-gkllf" podUID="28191393-c4b1-4d80-9994-ca31868c9fb4" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.25:8443/livez\": dial tcp 10.217.0.25:8443: connect: connection refused" Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.224865 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:03 crc kubenswrapper[4703]: E0130 11:59:03.225252 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:03.725240361 +0000 UTC m=+179.503062015 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.456952 4703 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.461038 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:03 crc kubenswrapper[4703]: E0130 11:59:03.461546 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:03.961523663 +0000 UTC m=+179.739345327 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.532934 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wh47z\" (UniqueName: \"kubernetes.io/projected/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-kube-api-access-wh47z\") pod \"certified-operators-64ptg\" (UID: \"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa\") " pod="openshift-marketplace/certified-operators-64ptg" Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.673012 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-64ptg" Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.675331 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:03 crc kubenswrapper[4703]: E0130 11:59:03.676107 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:04.176086818 +0000 UTC m=+179.953908472 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.683557 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:03 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:03 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:03 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.683622 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.706731 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" event={"ID":"de735d2c-8817-415e-b190-df96bf922407","Type":"ContainerStarted","Data":"dd9bd23f1156931d5d9d1a3734a7aeaa1fc12da79fb2950bfff4aa679e8b7d15"} Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.706775 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.706793 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.706827 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-zlb62" Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.860868 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:03 crc kubenswrapper[4703]: E0130 11:59:03.862037 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:04.362021469 +0000 UTC m=+180.139843123 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.883733 4703 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-30T11:59:03.456987015Z","Handler":null,"Name":""} Jan 30 11:59:03 crc kubenswrapper[4703]: I0130 11:59:03.975568 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:03 crc kubenswrapper[4703]: E0130 11:59:03.976105 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-30 11:59:04.47609082 +0000 UTC m=+180.253912474 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-2r4b9" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.079468 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:04 crc kubenswrapper[4703]: E0130 11:59:04.079782 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-30 11:59:04.579766745 +0000 UTC m=+180.357588399 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.081022 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.125589 4703 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.125626 4703 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.180883 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.277526 4703 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.277585 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.432491 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-m9qpx"] Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.433450 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m9qpx" Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.545243 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f49515e4-f9bb-4741-a979-5d59fbc7198d-utilities\") pod \"redhat-marketplace-m9qpx\" (UID: \"f49515e4-f9bb-4741-a979-5d59fbc7198d\") " pod="openshift-marketplace/redhat-marketplace-m9qpx" Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.545292 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnjdm\" (UniqueName: \"kubernetes.io/projected/f49515e4-f9bb-4741-a979-5d59fbc7198d-kube-api-access-xnjdm\") pod \"redhat-marketplace-m9qpx\" (UID: \"f49515e4-f9bb-4741-a979-5d59fbc7198d\") " pod="openshift-marketplace/redhat-marketplace-m9qpx" Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.545383 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f49515e4-f9bb-4741-a979-5d59fbc7198d-catalog-content\") pod \"redhat-marketplace-m9qpx\" (UID: \"f49515e4-f9bb-4741-a979-5d59fbc7198d\") " pod="openshift-marketplace/redhat-marketplace-m9qpx" Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.572759 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.646414 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"df97e9d9-5d6d-49e9-972d-84fbfb77cb2f","Type":"ContainerStarted","Data":"34a38738556a737e1250017564e49562030f2c94fdfbfd737f3832faf0cd63b2"} Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.649523 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f49515e4-f9bb-4741-a979-5d59fbc7198d-utilities\") pod \"redhat-marketplace-m9qpx\" (UID: \"f49515e4-f9bb-4741-a979-5d59fbc7198d\") " pod="openshift-marketplace/redhat-marketplace-m9qpx" Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.649616 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnjdm\" (UniqueName: \"kubernetes.io/projected/f49515e4-f9bb-4741-a979-5d59fbc7198d-kube-api-access-xnjdm\") pod \"redhat-marketplace-m9qpx\" (UID: \"f49515e4-f9bb-4741-a979-5d59fbc7198d\") " pod="openshift-marketplace/redhat-marketplace-m9qpx" Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.649852 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f49515e4-f9bb-4741-a979-5d59fbc7198d-catalog-content\") pod \"redhat-marketplace-m9qpx\" (UID: \"f49515e4-f9bb-4741-a979-5d59fbc7198d\") " pod="openshift-marketplace/redhat-marketplace-m9qpx" Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.650546 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f49515e4-f9bb-4741-a979-5d59fbc7198d-catalog-content\") pod \"redhat-marketplace-m9qpx\" (UID: \"f49515e4-f9bb-4741-a979-5d59fbc7198d\") " pod="openshift-marketplace/redhat-marketplace-m9qpx" Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.651594 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f49515e4-f9bb-4741-a979-5d59fbc7198d-utilities\") pod \"redhat-marketplace-m9qpx\" (UID: \"f49515e4-f9bb-4741-a979-5d59fbc7198d\") " pod="openshift-marketplace/redhat-marketplace-m9qpx" Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.654811 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:04 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:04 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:04 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.654880 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.689927 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-2r4b9\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.799189 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 30 11:59:04 crc kubenswrapper[4703]: I0130 11:59:04.850855 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.015067 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnjdm\" (UniqueName: \"kubernetes.io/projected/f49515e4-f9bb-4741-a979-5d59fbc7198d-kube-api-access-xnjdm\") pod \"redhat-marketplace-m9qpx\" (UID: \"f49515e4-f9bb-4741-a979-5d59fbc7198d\") " pod="openshift-marketplace/redhat-marketplace-m9qpx" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.376237 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vztsq"] Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.438486 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.557586 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.557843 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m9qpx" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.655425 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-m9qpx"] Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.658368 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vztsq" Jan 30 11:59:05 crc kubenswrapper[4703]: E0130 11:59:05.665780 4703 reconciler_common.go:156] "operationExecutor.UnmountVolume failed (controllerAttachDetachEnabled true) for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") : UnmountVolume.NewUnmounter failed for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") : kubernetes.io/csi: unmounter failed to load volume data file [/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~csi/pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8/mount]: kubernetes.io/csi: failed to open volume data file [/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~csi/pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8/vol_data.json]: open /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~csi/pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8/vol_data.json: no such file or directory" err="UnmountVolume.NewUnmounter failed for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") : kubernetes.io/csi: unmounter failed to load volume data file [/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~csi/pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8/mount]: kubernetes.io/csi: failed to open volume data file [/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~csi/pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8/vol_data.json]: open /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~csi/pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8/vol_data.json: no such file or directory" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.686662 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:05 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:05 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:05 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.686706 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.767483 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdfc1d61-7a35-49e6-a091-83226033a80f-utilities\") pod \"redhat-marketplace-vztsq\" (UID: \"bdfc1d61-7a35-49e6-a091-83226033a80f\") " pod="openshift-marketplace/redhat-marketplace-vztsq" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.767539 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdfc1d61-7a35-49e6-a091-83226033a80f-catalog-content\") pod \"redhat-marketplace-vztsq\" (UID: \"bdfc1d61-7a35-49e6-a091-83226033a80f\") " pod="openshift-marketplace/redhat-marketplace-vztsq" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.767580 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tq7t\" (UniqueName: \"kubernetes.io/projected/bdfc1d61-7a35-49e6-a091-83226033a80f-kube-api-access-6tq7t\") pod \"redhat-marketplace-vztsq\" (UID: \"bdfc1d61-7a35-49e6-a091-83226033a80f\") " pod="openshift-marketplace/redhat-marketplace-vztsq" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.861969 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.862546 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" event={"ID":"de735d2c-8817-415e-b190-df96bf922407","Type":"ContainerStarted","Data":"f7ce358272ba7074fcc1cd88c2c0d3ef25b93a8b2f8f640382e6d8eaebfcf625"} Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.862570 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-988gc" event={"ID":"4e4915d0-912f-426c-9d74-3d42e36678ed","Type":"ContainerStarted","Data":"1822eba416a2314ed01f7393600216cfef79070903e948de31cac84a88df5bc8"} Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.862583 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vztsq"] Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.862642 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rsvj7" event={"ID":"11176772-9170-499f-8fec-e460709fd300","Type":"ContainerStarted","Data":"ff8a7b163e69eb61ab5953daa2df4630cd0f23723ab56084e54382f8639e0f0f"} Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.862659 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ngspg"] Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.864548 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ngspg" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.866395 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.868896 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdfc1d61-7a35-49e6-a091-83226033a80f-utilities\") pod \"redhat-marketplace-vztsq\" (UID: \"bdfc1d61-7a35-49e6-a091-83226033a80f\") " pod="openshift-marketplace/redhat-marketplace-vztsq" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.868974 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdfc1d61-7a35-49e6-a091-83226033a80f-catalog-content\") pod \"redhat-marketplace-vztsq\" (UID: \"bdfc1d61-7a35-49e6-a091-83226033a80f\") " pod="openshift-marketplace/redhat-marketplace-vztsq" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.869028 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tq7t\" (UniqueName: \"kubernetes.io/projected/bdfc1d61-7a35-49e6-a091-83226033a80f-kube-api-access-6tq7t\") pod \"redhat-marketplace-vztsq\" (UID: \"bdfc1d61-7a35-49e6-a091-83226033a80f\") " pod="openshift-marketplace/redhat-marketplace-vztsq" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.869151 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-utilities\") pod \"redhat-operators-ngspg\" (UID: \"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436\") " pod="openshift-marketplace/redhat-operators-ngspg" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.869199 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-catalog-content\") pod \"redhat-operators-ngspg\" (UID: \"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436\") " pod="openshift-marketplace/redhat-operators-ngspg" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.869236 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gvnv\" (UniqueName: \"kubernetes.io/projected/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-kube-api-access-8gvnv\") pod \"redhat-operators-ngspg\" (UID: \"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436\") " pod="openshift-marketplace/redhat-operators-ngspg" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.869878 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdfc1d61-7a35-49e6-a091-83226033a80f-utilities\") pod \"redhat-marketplace-vztsq\" (UID: \"bdfc1d61-7a35-49e6-a091-83226033a80f\") " pod="openshift-marketplace/redhat-marketplace-vztsq" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.870160 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdfc1d61-7a35-49e6-a091-83226033a80f-catalog-content\") pod \"redhat-marketplace-vztsq\" (UID: \"bdfc1d61-7a35-49e6-a091-83226033a80f\") " pod="openshift-marketplace/redhat-marketplace-vztsq" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.878306 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rsvj7"] Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.922369 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ngspg"] Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.949463 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tq7t\" (UniqueName: \"kubernetes.io/projected/bdfc1d61-7a35-49e6-a091-83226033a80f-kube-api-access-6tq7t\") pod \"redhat-marketplace-vztsq\" (UID: \"bdfc1d61-7a35-49e6-a091-83226033a80f\") " pod="openshift-marketplace/redhat-marketplace-vztsq" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.966375 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xzz7n"] Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.967748 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xzz7n" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.987460 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gvnv\" (UniqueName: \"kubernetes.io/projected/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-kube-api-access-8gvnv\") pod \"redhat-operators-ngspg\" (UID: \"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436\") " pod="openshift-marketplace/redhat-operators-ngspg" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.987647 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61693ed0-e352-4c89-9076-be1acb1a0bfe-catalog-content\") pod \"redhat-operators-xzz7n\" (UID: \"61693ed0-e352-4c89-9076-be1acb1a0bfe\") " pod="openshift-marketplace/redhat-operators-xzz7n" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.987805 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61693ed0-e352-4c89-9076-be1acb1a0bfe-utilities\") pod \"redhat-operators-xzz7n\" (UID: \"61693ed0-e352-4c89-9076-be1acb1a0bfe\") " pod="openshift-marketplace/redhat-operators-xzz7n" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.988055 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-utilities\") pod \"redhat-operators-ngspg\" (UID: \"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436\") " pod="openshift-marketplace/redhat-operators-ngspg" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.988226 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-catalog-content\") pod \"redhat-operators-ngspg\" (UID: \"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436\") " pod="openshift-marketplace/redhat-operators-ngspg" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.988341 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p75v5\" (UniqueName: \"kubernetes.io/projected/61693ed0-e352-4c89-9076-be1acb1a0bfe-kube-api-access-p75v5\") pod \"redhat-operators-xzz7n\" (UID: \"61693ed0-e352-4c89-9076-be1acb1a0bfe\") " pod="openshift-marketplace/redhat-operators-xzz7n" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.989592 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-utilities\") pod \"redhat-operators-ngspg\" (UID: \"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436\") " pod="openshift-marketplace/redhat-operators-ngspg" Jan 30 11:59:05 crc kubenswrapper[4703]: I0130 11:59:05.989959 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-catalog-content\") pod \"redhat-operators-ngspg\" (UID: \"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436\") " pod="openshift-marketplace/redhat-operators-ngspg" Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.015946 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xzz7n"] Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.017152 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-988gc"] Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.038551 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-wsjdp" podStartSLOduration=21.038534794 podStartE2EDuration="21.038534794s" podCreationTimestamp="2026-01-30 11:58:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:59:05.993801263 +0000 UTC m=+181.771622937" watchObservedRunningTime="2026-01-30 11:59:06.038534794 +0000 UTC m=+181.816356448" Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.054930 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gvnv\" (UniqueName: \"kubernetes.io/projected/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-kube-api-access-8gvnv\") pod \"redhat-operators-ngspg\" (UID: \"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436\") " pod="openshift-marketplace/redhat-operators-ngspg" Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.068427 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vztsq" Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.074780 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ngspg" Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.225692 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p75v5\" (UniqueName: \"kubernetes.io/projected/61693ed0-e352-4c89-9076-be1acb1a0bfe-kube-api-access-p75v5\") pod \"redhat-operators-xzz7n\" (UID: \"61693ed0-e352-4c89-9076-be1acb1a0bfe\") " pod="openshift-marketplace/redhat-operators-xzz7n" Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.225801 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61693ed0-e352-4c89-9076-be1acb1a0bfe-catalog-content\") pod \"redhat-operators-xzz7n\" (UID: \"61693ed0-e352-4c89-9076-be1acb1a0bfe\") " pod="openshift-marketplace/redhat-operators-xzz7n" Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.225867 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61693ed0-e352-4c89-9076-be1acb1a0bfe-utilities\") pod \"redhat-operators-xzz7n\" (UID: \"61693ed0-e352-4c89-9076-be1acb1a0bfe\") " pod="openshift-marketplace/redhat-operators-xzz7n" Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.244578 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61693ed0-e352-4c89-9076-be1acb1a0bfe-catalog-content\") pod \"redhat-operators-xzz7n\" (UID: \"61693ed0-e352-4c89-9076-be1acb1a0bfe\") " pod="openshift-marketplace/redhat-operators-xzz7n" Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.244682 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-64ptg"] Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.301549 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p75v5\" (UniqueName: \"kubernetes.io/projected/61693ed0-e352-4c89-9076-be1acb1a0bfe-kube-api-access-p75v5\") pod \"redhat-operators-xzz7n\" (UID: \"61693ed0-e352-4c89-9076-be1acb1a0bfe\") " pod="openshift-marketplace/redhat-operators-xzz7n" Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.439648 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8gf7w"] Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.528536 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61693ed0-e352-4c89-9076-be1acb1a0bfe-utilities\") pod \"redhat-operators-xzz7n\" (UID: \"61693ed0-e352-4c89-9076-be1acb1a0bfe\") " pod="openshift-marketplace/redhat-operators-xzz7n" Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.698013 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xzz7n" Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.715135 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:06 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:06 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:06 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.715202 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.898244 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8gf7w" event={"ID":"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63","Type":"ContainerStarted","Data":"736f7b46577942901a1dd69fdfca4905edae1e0037b59192de727ae55b98b7f0"} Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.903309 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-988gc" event={"ID":"4e4915d0-912f-426c-9d74-3d42e36678ed","Type":"ContainerStarted","Data":"971c4ad73139720930d3ef27b04952062e9b9b1621fc36d165f152d0f949cf83"} Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.906943 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rsvj7" event={"ID":"11176772-9170-499f-8fec-e460709fd300","Type":"ContainerStarted","Data":"f293446aff3a8701046f8d6de7eb394971e96b034800138e9c74d7b165f5bf88"} Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.909579 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64ptg" event={"ID":"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa","Type":"ContainerStarted","Data":"4e7c0dcab3fa46b2360edcd0c722ae4ccdfcfbd94b57af3ae49051809da2e596"} Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.912523 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"df97e9d9-5d6d-49e9-972d-84fbfb77cb2f","Type":"ContainerStarted","Data":"e5f87dba1b6d66c5b609aa3f990d2a87f1df9e446a68966c94d826e3b997a581"} Jan 30 11:59:06 crc kubenswrapper[4703]: I0130 11:59:06.961165 4703 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 11:59:06 crc kubenswrapper[4703]: E0130 11:59:06.982705 4703 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod11176772_9170_499f_8fec_e460709fd300.slice/crio-f293446aff3a8701046f8d6de7eb394971e96b034800138e9c74d7b165f5bf88.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod11176772_9170_499f_8fec_e460709fd300.slice/crio-conmon-f293446aff3a8701046f8d6de7eb394971e96b034800138e9c74d7b165f5bf88.scope\": RecentStats: unable to find data in memory cache]" Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.293087 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=6.293062369 podStartE2EDuration="6.293062369s" podCreationTimestamp="2026-01-30 11:59:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:59:07.263374744 +0000 UTC m=+183.041196398" watchObservedRunningTime="2026-01-30 11:59:07.293062369 +0000 UTC m=+183.070884023" Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.367940 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.368004 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.368331 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2r4b9"] Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.368578 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.368615 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.413645 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-m9qpx"] Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.582732 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-zwbps" Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.594251 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:07 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:07 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:07 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.594320 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.940581 4703 generic.go:334] "Generic (PLEG): container finished" podID="11176772-9170-499f-8fec-e460709fd300" containerID="f293446aff3a8701046f8d6de7eb394971e96b034800138e9c74d7b165f5bf88" exitCode=0 Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.940662 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rsvj7" event={"ID":"11176772-9170-499f-8fec-e460709fd300","Type":"ContainerDied","Data":"f293446aff3a8701046f8d6de7eb394971e96b034800138e9c74d7b165f5bf88"} Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.941563 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m9qpx" event={"ID":"f49515e4-f9bb-4741-a979-5d59fbc7198d","Type":"ContainerStarted","Data":"af4e235e0a135e9a9db7394ee3f358f8e2b4b67ee4943edd9c52e77e90423d5e"} Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.943072 4703 generic.go:334] "Generic (PLEG): container finished" podID="2eaf3e49-2394-495c-926e-5504ff81ccc5" containerID="854d15c00e6754d489a505fd2e9138494328c64efebf46f08e1c4a3040ebbed5" exitCode=0 Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.943112 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" event={"ID":"2eaf3e49-2394-495c-926e-5504ff81ccc5","Type":"ContainerDied","Data":"854d15c00e6754d489a505fd2e9138494328c64efebf46f08e1c4a3040ebbed5"} Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.944319 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" event={"ID":"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5","Type":"ContainerStarted","Data":"7824f593c6b33e0ceb239b77b697112c82d4c55fb7d0a97ef13b7eac6eab3230"} Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.945707 4703 generic.go:334] "Generic (PLEG): container finished" podID="4e4915d0-912f-426c-9d74-3d42e36678ed" containerID="971c4ad73139720930d3ef27b04952062e9b9b1621fc36d165f152d0f949cf83" exitCode=0 Jan 30 11:59:07 crc kubenswrapper[4703]: I0130 11:59:07.946192 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-988gc" event={"ID":"4e4915d0-912f-426c-9d74-3d42e36678ed","Type":"ContainerDied","Data":"971c4ad73139720930d3ef27b04952062e9b9b1621fc36d165f152d0f949cf83"} Jan 30 11:59:08 crc kubenswrapper[4703]: I0130 11:59:08.259650 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-l4tvp" Jan 30 11:59:08 crc kubenswrapper[4703]: I0130 11:59:08.342958 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:59:08 crc kubenswrapper[4703]: I0130 11:59:08.343000 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:59:08 crc kubenswrapper[4703]: I0130 11:59:08.346397 4703 patch_prober.go:28] interesting pod/console-f9d7485db-mxjx6 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.19:8443/health\": dial tcp 10.217.0.19:8443: connect: connection refused" start-of-body= Jan 30 11:59:08 crc kubenswrapper[4703]: I0130 11:59:08.346444 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-mxjx6" podUID="aa4aaaa5-396e-4e62-92a3-74b835af58a7" containerName="console" probeResult="failure" output="Get \"https://10.217.0.19:8443/health\": dial tcp 10.217.0.19:8443: connect: connection refused" Jan 30 11:59:08 crc kubenswrapper[4703]: I0130 11:59:08.391833 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 11:59:08 crc kubenswrapper[4703]: I0130 11:59:08.424023 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vztsq"] Jan 30 11:59:08 crc kubenswrapper[4703]: I0130 11:59:08.595246 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:08 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:08 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:08 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:08 crc kubenswrapper[4703]: I0130 11:59:08.595610 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:08 crc kubenswrapper[4703]: I0130 11:59:08.602324 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ngspg"] Jan 30 11:59:08 crc kubenswrapper[4703]: I0130 11:59:08.608868 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" Jan 30 11:59:08 crc kubenswrapper[4703]: I0130 11:59:08.612331 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.047216 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xzz7n"] Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.119933 4703 generic.go:334] "Generic (PLEG): container finished" podID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" containerID="e45e9f34f0a242323f995d20efb819515d38699870860e3d0cd21a497215d424" exitCode=0 Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.120012 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64ptg" event={"ID":"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa","Type":"ContainerDied","Data":"e45e9f34f0a242323f995d20efb819515d38699870860e3d0cd21a497215d424"} Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.125602 4703 generic.go:334] "Generic (PLEG): container finished" podID="f49515e4-f9bb-4741-a979-5d59fbc7198d" containerID="47a32128d7e654c4f7187e443fdc88a0be97522a72fc7542b62b4ab4f34f248a" exitCode=0 Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.125658 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m9qpx" event={"ID":"f49515e4-f9bb-4741-a979-5d59fbc7198d","Type":"ContainerDied","Data":"47a32128d7e654c4f7187e443fdc88a0be97522a72fc7542b62b4ab4f34f248a"} Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.175525 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" event={"ID":"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5","Type":"ContainerStarted","Data":"552ddaca5d3e9e9e211dfe06442acc8fbc47dd28d0a6e1fbbfb219564697658f"} Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.176490 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.328482 4703 generic.go:334] "Generic (PLEG): container finished" podID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" containerID="7066c37d6bbb70531ff839e9219241b538003a23fe4dad465901a92ab32b09b0" exitCode=0 Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.330373 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8gf7w" event={"ID":"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63","Type":"ContainerDied","Data":"7066c37d6bbb70531ff839e9219241b538003a23fe4dad465901a92ab32b09b0"} Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.358637 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.372398 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.373790 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" podStartSLOduration=157.373771261 podStartE2EDuration="2m37.373771261s" podCreationTimestamp="2026-01-30 11:56:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:59:09.366799643 +0000 UTC m=+185.144621297" watchObservedRunningTime="2026-01-30 11:59:09.373771261 +0000 UTC m=+185.151592915" Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.385880 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.386158 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.391543 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.551961 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c868f7b8-6ab3-45a4-8057-042c3f583849-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"c868f7b8-6ab3-45a4-8057-042c3f583849\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.552162 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c868f7b8-6ab3-45a4-8057-042c3f583849-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"c868f7b8-6ab3-45a4-8057-042c3f583849\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.564712 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:09 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:09 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:09 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.564793 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.654185 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c868f7b8-6ab3-45a4-8057-042c3f583849-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"c868f7b8-6ab3-45a4-8057-042c3f583849\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.654394 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c868f7b8-6ab3-45a4-8057-042c3f583849-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"c868f7b8-6ab3-45a4-8057-042c3f583849\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.655234 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c868f7b8-6ab3-45a4-8057-042c3f583849-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"c868f7b8-6ab3-45a4-8057-042c3f583849\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 30 11:59:09 crc kubenswrapper[4703]: I0130 11:59:09.794476 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c868f7b8-6ab3-45a4-8057-042c3f583849-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"c868f7b8-6ab3-45a4-8057-042c3f583849\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 30 11:59:10 crc kubenswrapper[4703]: I0130 11:59:10.038100 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 30 11:59:10 crc kubenswrapper[4703]: I0130 11:59:10.449133 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vztsq" event={"ID":"bdfc1d61-7a35-49e6-a091-83226033a80f","Type":"ContainerStarted","Data":"06770b6c54c6f7b88cdab254bb1d7993cd49aa4342ae404c92c1e56275a6e2cc"} Jan 30 11:59:10 crc kubenswrapper[4703]: I0130 11:59:10.451318 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ngspg" event={"ID":"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436","Type":"ContainerStarted","Data":"4df821b456f302cb841bf3aefde2accd9138542c6b397085a6eea587f8ae5f3f"} Jan 30 11:59:10 crc kubenswrapper[4703]: I0130 11:59:10.453713 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xzz7n" event={"ID":"61693ed0-e352-4c89-9076-be1acb1a0bfe","Type":"ContainerStarted","Data":"f5f689484cfb75969390d075c7b5bb0ef3d1e662582c9b78ecbd77839999d1be"} Jan 30 11:59:10 crc kubenswrapper[4703]: I0130 11:59:10.569021 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:10 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:10 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:10 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:10 crc kubenswrapper[4703]: I0130 11:59:10.569441 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.559452 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:11 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:11 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:11 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.559824 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.711275 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" event={"ID":"2eaf3e49-2394-495c-926e-5504ff81ccc5","Type":"ContainerDied","Data":"c17e92e00793f9efbbac5acb7f9dc233c3a17a291e5259edbee12560790d2c4d"} Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.711320 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c17e92e00793f9efbbac5acb7f9dc233c3a17a291e5259edbee12560790d2c4d" Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.730853 4703 generic.go:334] "Generic (PLEG): container finished" podID="bdfc1d61-7a35-49e6-a091-83226033a80f" containerID="cfb233911ad45347c598184bb4d5d424fe43e22808d6a2f2b0af71deea67b738" exitCode=0 Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.730991 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vztsq" event={"ID":"bdfc1d61-7a35-49e6-a091-83226033a80f","Type":"ContainerDied","Data":"cfb233911ad45347c598184bb4d5d424fe43e22808d6a2f2b0af71deea67b738"} Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.741132 4703 generic.go:334] "Generic (PLEG): container finished" podID="df97e9d9-5d6d-49e9-972d-84fbfb77cb2f" containerID="e5f87dba1b6d66c5b609aa3f990d2a87f1df9e446a68966c94d826e3b997a581" exitCode=0 Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.741272 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"df97e9d9-5d6d-49e9-972d-84fbfb77cb2f","Type":"ContainerDied","Data":"e5f87dba1b6d66c5b609aa3f990d2a87f1df9e446a68966c94d826e3b997a581"} Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.746917 4703 generic.go:334] "Generic (PLEG): container finished" podID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" containerID="d65257231dc110632856b5d8f3ce3f8e47f9990f70427eac3f5a518841d8931e" exitCode=0 Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.746985 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ngspg" event={"ID":"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436","Type":"ContainerDied","Data":"d65257231dc110632856b5d8f3ce3f8e47f9990f70427eac3f5a518841d8931e"} Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.764352 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.764554 4703 generic.go:334] "Generic (PLEG): container finished" podID="61693ed0-e352-4c89-9076-be1acb1a0bfe" containerID="818e0bdb0de312f7e96185a7d002cbcdf59cecc454140511cb6e3890d0b8ea75" exitCode=0 Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.765795 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xzz7n" event={"ID":"61693ed0-e352-4c89-9076-be1acb1a0bfe","Type":"ContainerDied","Data":"818e0bdb0de312f7e96185a7d002cbcdf59cecc454140511cb6e3890d0b8ea75"} Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.825844 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92rr9\" (UniqueName: \"kubernetes.io/projected/2eaf3e49-2394-495c-926e-5504ff81ccc5-kube-api-access-92rr9\") pod \"2eaf3e49-2394-495c-926e-5504ff81ccc5\" (UID: \"2eaf3e49-2394-495c-926e-5504ff81ccc5\") " Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.825911 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2eaf3e49-2394-495c-926e-5504ff81ccc5-config-volume\") pod \"2eaf3e49-2394-495c-926e-5504ff81ccc5\" (UID: \"2eaf3e49-2394-495c-926e-5504ff81ccc5\") " Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.826022 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2eaf3e49-2394-495c-926e-5504ff81ccc5-secret-volume\") pod \"2eaf3e49-2394-495c-926e-5504ff81ccc5\" (UID: \"2eaf3e49-2394-495c-926e-5504ff81ccc5\") " Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.830737 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2eaf3e49-2394-495c-926e-5504ff81ccc5-config-volume" (OuterVolumeSpecName: "config-volume") pod "2eaf3e49-2394-495c-926e-5504ff81ccc5" (UID: "2eaf3e49-2394-495c-926e-5504ff81ccc5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.848576 4703 patch_prober.go:28] interesting pod/apiserver-76f77b778f-gkllf container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 30 11:59:11 crc kubenswrapper[4703]: [+]log ok Jan 30 11:59:11 crc kubenswrapper[4703]: [+]etcd ok Jan 30 11:59:11 crc kubenswrapper[4703]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 30 11:59:11 crc kubenswrapper[4703]: [+]poststarthook/generic-apiserver-start-informers ok Jan 30 11:59:11 crc kubenswrapper[4703]: [+]poststarthook/max-in-flight-filter ok Jan 30 11:59:11 crc kubenswrapper[4703]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 30 11:59:11 crc kubenswrapper[4703]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 30 11:59:11 crc kubenswrapper[4703]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 30 11:59:11 crc kubenswrapper[4703]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Jan 30 11:59:11 crc kubenswrapper[4703]: [+]poststarthook/project.openshift.io-projectcache ok Jan 30 11:59:11 crc kubenswrapper[4703]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 30 11:59:11 crc kubenswrapper[4703]: [-]poststarthook/openshift.io-startinformers failed: reason withheld Jan 30 11:59:11 crc kubenswrapper[4703]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 30 11:59:11 crc kubenswrapper[4703]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 30 11:59:11 crc kubenswrapper[4703]: livez check failed Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.848626 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-gkllf" podUID="28191393-c4b1-4d80-9994-ca31868c9fb4" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.853853 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2eaf3e49-2394-495c-926e-5504ff81ccc5-kube-api-access-92rr9" (OuterVolumeSpecName: "kube-api-access-92rr9") pod "2eaf3e49-2394-495c-926e-5504ff81ccc5" (UID: "2eaf3e49-2394-495c-926e-5504ff81ccc5"). InnerVolumeSpecName "kube-api-access-92rr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.927402 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92rr9\" (UniqueName: \"kubernetes.io/projected/2eaf3e49-2394-495c-926e-5504ff81ccc5-kube-api-access-92rr9\") on node \"crc\" DevicePath \"\"" Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.927444 4703 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2eaf3e49-2394-495c-926e-5504ff81ccc5-config-volume\") on node \"crc\" DevicePath \"\"" Jan 30 11:59:11 crc kubenswrapper[4703]: I0130 11:59:11.973210 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2eaf3e49-2394-495c-926e-5504ff81ccc5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2eaf3e49-2394-495c-926e-5504ff81ccc5" (UID: "2eaf3e49-2394-495c-926e-5504ff81ccc5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 11:59:12 crc kubenswrapper[4703]: I0130 11:59:12.003085 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 30 11:59:12 crc kubenswrapper[4703]: I0130 11:59:12.031870 4703 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2eaf3e49-2394-495c-926e-5504ff81ccc5-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 30 11:59:12 crc kubenswrapper[4703]: I0130 11:59:12.582763 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:12 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:12 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:12 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:12 crc kubenswrapper[4703]: I0130 11:59:12.582812 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:12 crc kubenswrapper[4703]: I0130 11:59:12.881376 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 11:59:12 crc kubenswrapper[4703]: I0130 11:59:12.881422 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 11:59:12 crc kubenswrapper[4703]: I0130 11:59:12.927880 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"c868f7b8-6ab3-45a4-8057-042c3f583849","Type":"ContainerStarted","Data":"64becde19da3b0b2a0781a985aeea170582fea349f8c758b22ffdd6ecbd6dab8"} Jan 30 11:59:12 crc kubenswrapper[4703]: I0130 11:59:12.928002 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786" Jan 30 11:59:13 crc kubenswrapper[4703]: I0130 11:59:13.215721 4703 patch_prober.go:28] interesting pod/apiserver-76f77b778f-gkllf container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 30 11:59:13 crc kubenswrapper[4703]: [+]log ok Jan 30 11:59:13 crc kubenswrapper[4703]: [+]etcd ok Jan 30 11:59:13 crc kubenswrapper[4703]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 30 11:59:13 crc kubenswrapper[4703]: [+]poststarthook/generic-apiserver-start-informers ok Jan 30 11:59:13 crc kubenswrapper[4703]: [+]poststarthook/max-in-flight-filter ok Jan 30 11:59:13 crc kubenswrapper[4703]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 30 11:59:13 crc kubenswrapper[4703]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 30 11:59:13 crc kubenswrapper[4703]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 30 11:59:13 crc kubenswrapper[4703]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Jan 30 11:59:13 crc kubenswrapper[4703]: [+]poststarthook/project.openshift.io-projectcache ok Jan 30 11:59:13 crc kubenswrapper[4703]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 30 11:59:13 crc kubenswrapper[4703]: [+]poststarthook/openshift.io-startinformers ok Jan 30 11:59:13 crc kubenswrapper[4703]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 30 11:59:13 crc kubenswrapper[4703]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 30 11:59:13 crc kubenswrapper[4703]: livez check failed Jan 30 11:59:13 crc kubenswrapper[4703]: I0130 11:59:13.216157 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-gkllf" podUID="28191393-c4b1-4d80-9994-ca31868c9fb4" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:13 crc kubenswrapper[4703]: I0130 11:59:13.544087 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:13 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:13 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:13 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:13 crc kubenswrapper[4703]: I0130 11:59:13.544199 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:14 crc kubenswrapper[4703]: I0130 11:59:14.095307 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"c868f7b8-6ab3-45a4-8057-042c3f583849","Type":"ContainerStarted","Data":"3de21695f9bfb38c64b0b7d35423569dbdfcc7b211678cef9548b7f0d476f388"} Jan 30 11:59:14 crc kubenswrapper[4703]: I0130 11:59:14.568760 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:14 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:14 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:14 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:14 crc kubenswrapper[4703]: I0130 11:59:14.568836 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:14 crc kubenswrapper[4703]: I0130 11:59:14.569634 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=5.5696182400000005 podStartE2EDuration="5.56961824s" podCreationTimestamp="2026-01-30 11:59:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 11:59:14.569364072 +0000 UTC m=+190.347185726" watchObservedRunningTime="2026-01-30 11:59:14.56961824 +0000 UTC m=+190.347439894" Jan 30 11:59:14 crc kubenswrapper[4703]: I0130 11:59:14.875302 4703 patch_prober.go:28] interesting pod/image-registry-697d97f7c8-2r4b9 container/registry namespace/openshift-image-registry: Liveness probe status=failure output="Get \"https://10.217.0.15:5000/healthz\": dial tcp 10.217.0.15:5000: connect: connection refused" start-of-body= Jan 30 11:59:14 crc kubenswrapper[4703]: I0130 11:59:14.875862 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" podUID="2ab63a92-b99a-4d22-a8b0-a30409cd6ba5" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.15:5000/healthz\": dial tcp 10.217.0.15:5000: connect: connection refused" Jan 30 11:59:14 crc kubenswrapper[4703]: I0130 11:59:14.993729 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 30 11:59:15 crc kubenswrapper[4703]: I0130 11:59:15.094369 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/df97e9d9-5d6d-49e9-972d-84fbfb77cb2f-kubelet-dir\") pod \"df97e9d9-5d6d-49e9-972d-84fbfb77cb2f\" (UID: \"df97e9d9-5d6d-49e9-972d-84fbfb77cb2f\") " Jan 30 11:59:15 crc kubenswrapper[4703]: I0130 11:59:15.094475 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/df97e9d9-5d6d-49e9-972d-84fbfb77cb2f-kube-api-access\") pod \"df97e9d9-5d6d-49e9-972d-84fbfb77cb2f\" (UID: \"df97e9d9-5d6d-49e9-972d-84fbfb77cb2f\") " Jan 30 11:59:15 crc kubenswrapper[4703]: I0130 11:59:15.095106 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/df97e9d9-5d6d-49e9-972d-84fbfb77cb2f-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "df97e9d9-5d6d-49e9-972d-84fbfb77cb2f" (UID: "df97e9d9-5d6d-49e9-972d-84fbfb77cb2f"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 11:59:15 crc kubenswrapper[4703]: I0130 11:59:15.118996 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 30 11:59:15 crc kubenswrapper[4703]: I0130 11:59:15.186091 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df97e9d9-5d6d-49e9-972d-84fbfb77cb2f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "df97e9d9-5d6d-49e9-972d-84fbfb77cb2f" (UID: "df97e9d9-5d6d-49e9-972d-84fbfb77cb2f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:59:15 crc kubenswrapper[4703]: I0130 11:59:15.196102 4703 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/df97e9d9-5d6d-49e9-972d-84fbfb77cb2f-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 30 11:59:15 crc kubenswrapper[4703]: I0130 11:59:15.196148 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/df97e9d9-5d6d-49e9-972d-84fbfb77cb2f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 11:59:15 crc kubenswrapper[4703]: I0130 11:59:15.363879 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"df97e9d9-5d6d-49e9-972d-84fbfb77cb2f","Type":"ContainerDied","Data":"34a38738556a737e1250017564e49562030f2c94fdfbfd737f3832faf0cd63b2"} Jan 30 11:59:15 crc kubenswrapper[4703]: I0130 11:59:15.363933 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="34a38738556a737e1250017564e49562030f2c94fdfbfd737f3832faf0cd63b2" Jan 30 11:59:15 crc kubenswrapper[4703]: I0130 11:59:15.547893 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:15 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:15 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:15 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:15 crc kubenswrapper[4703]: I0130 11:59:15.547960 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:16 crc kubenswrapper[4703]: I0130 11:59:16.542936 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:16 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:16 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:16 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:16 crc kubenswrapper[4703]: I0130 11:59:16.543002 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:17 crc kubenswrapper[4703]: I0130 11:59:17.361189 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:59:17 crc kubenswrapper[4703]: I0130 11:59:17.362481 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:59:17 crc kubenswrapper[4703]: I0130 11:59:17.361248 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:59:17 crc kubenswrapper[4703]: I0130 11:59:17.362725 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:59:17 crc kubenswrapper[4703]: I0130 11:59:17.362794 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-q7gv4" Jan 30 11:59:17 crc kubenswrapper[4703]: I0130 11:59:17.363510 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"84c44c7e0ca622cf587ac57b54dde29fd5b63a01d41da8a617c675860133ca6e"} pod="openshift-console/downloads-7954f5f757-q7gv4" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 30 11:59:17 crc kubenswrapper[4703]: I0130 11:59:17.363604 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" containerID="cri-o://84c44c7e0ca622cf587ac57b54dde29fd5b63a01d41da8a617c675860133ca6e" gracePeriod=2 Jan 30 11:59:17 crc kubenswrapper[4703]: I0130 11:59:17.372214 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:59:17 crc kubenswrapper[4703]: I0130 11:59:17.372393 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:59:17 crc kubenswrapper[4703]: I0130 11:59:17.541622 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:17 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:17 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:17 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:17 crc kubenswrapper[4703]: I0130 11:59:17.541701 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:18 crc kubenswrapper[4703]: I0130 11:59:18.218472 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:59:18 crc kubenswrapper[4703]: I0130 11:59:18.224773 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-gkllf" Jan 30 11:59:18 crc kubenswrapper[4703]: I0130 11:59:18.353679 4703 patch_prober.go:28] interesting pod/console-f9d7485db-mxjx6 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.19:8443/health\": dial tcp 10.217.0.19:8443: connect: connection refused" start-of-body= Jan 30 11:59:18 crc kubenswrapper[4703]: I0130 11:59:18.353735 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-mxjx6" podUID="aa4aaaa5-396e-4e62-92a3-74b835af58a7" containerName="console" probeResult="failure" output="Get \"https://10.217.0.19:8443/health\": dial tcp 10.217.0.19:8443: connect: connection refused" Jan 30 11:59:18 crc kubenswrapper[4703]: I0130 11:59:18.484272 4703 generic.go:334] "Generic (PLEG): container finished" podID="c868f7b8-6ab3-45a4-8057-042c3f583849" containerID="3de21695f9bfb38c64b0b7d35423569dbdfcc7b211678cef9548b7f0d476f388" exitCode=0 Jan 30 11:59:18 crc kubenswrapper[4703]: I0130 11:59:18.484361 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"c868f7b8-6ab3-45a4-8057-042c3f583849","Type":"ContainerDied","Data":"3de21695f9bfb38c64b0b7d35423569dbdfcc7b211678cef9548b7f0d476f388"} Jan 30 11:59:18 crc kubenswrapper[4703]: I0130 11:59:18.499686 4703 generic.go:334] "Generic (PLEG): container finished" podID="1441806f-c27a-4a93-82e3-123caba174c5" containerID="84c44c7e0ca622cf587ac57b54dde29fd5b63a01d41da8a617c675860133ca6e" exitCode=0 Jan 30 11:59:18 crc kubenswrapper[4703]: I0130 11:59:18.500310 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-q7gv4" event={"ID":"1441806f-c27a-4a93-82e3-123caba174c5","Type":"ContainerDied","Data":"84c44c7e0ca622cf587ac57b54dde29fd5b63a01d41da8a617c675860133ca6e"} Jan 30 11:59:18 crc kubenswrapper[4703]: I0130 11:59:18.547047 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:18 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:18 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:18 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:18 crc kubenswrapper[4703]: I0130 11:59:18.547303 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:19 crc kubenswrapper[4703]: I0130 11:59:19.532234 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-q7gv4" event={"ID":"1441806f-c27a-4a93-82e3-123caba174c5","Type":"ContainerStarted","Data":"7bf15f880cc782893ac898aa58150394186eb0ba2d8ccb1292bea472dbc54b9c"} Jan 30 11:59:19 crc kubenswrapper[4703]: I0130 11:59:19.532778 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-q7gv4" Jan 30 11:59:19 crc kubenswrapper[4703]: I0130 11:59:19.532896 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:59:19 crc kubenswrapper[4703]: I0130 11:59:19.532987 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:59:19 crc kubenswrapper[4703]: I0130 11:59:19.812302 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:19 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:19 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:19 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:19 crc kubenswrapper[4703]: I0130 11:59:19.812573 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:20 crc kubenswrapper[4703]: I0130 11:59:20.592828 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:59:20 crc kubenswrapper[4703]: I0130 11:59:20.592899 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:59:20 crc kubenswrapper[4703]: I0130 11:59:20.595222 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:20 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:20 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:20 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:20 crc kubenswrapper[4703]: I0130 11:59:20.595248 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:21 crc kubenswrapper[4703]: I0130 11:59:21.540658 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:21 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:21 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:21 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:21 crc kubenswrapper[4703]: I0130 11:59:21.540701 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:21 crc kubenswrapper[4703]: I0130 11:59:21.672957 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:59:21 crc kubenswrapper[4703]: I0130 11:59:21.673061 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:59:22 crc kubenswrapper[4703]: I0130 11:59:22.044429 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 30 11:59:22 crc kubenswrapper[4703]: I0130 11:59:22.245553 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c868f7b8-6ab3-45a4-8057-042c3f583849-kube-api-access\") pod \"c868f7b8-6ab3-45a4-8057-042c3f583849\" (UID: \"c868f7b8-6ab3-45a4-8057-042c3f583849\") " Jan 30 11:59:22 crc kubenswrapper[4703]: I0130 11:59:22.245629 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c868f7b8-6ab3-45a4-8057-042c3f583849-kubelet-dir\") pod \"c868f7b8-6ab3-45a4-8057-042c3f583849\" (UID: \"c868f7b8-6ab3-45a4-8057-042c3f583849\") " Jan 30 11:59:22 crc kubenswrapper[4703]: I0130 11:59:22.246208 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c868f7b8-6ab3-45a4-8057-042c3f583849-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "c868f7b8-6ab3-45a4-8057-042c3f583849" (UID: "c868f7b8-6ab3-45a4-8057-042c3f583849"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 11:59:22 crc kubenswrapper[4703]: I0130 11:59:22.272286 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c868f7b8-6ab3-45a4-8057-042c3f583849-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "c868f7b8-6ab3-45a4-8057-042c3f583849" (UID: "c868f7b8-6ab3-45a4-8057-042c3f583849"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 11:59:22 crc kubenswrapper[4703]: I0130 11:59:22.369803 4703 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c868f7b8-6ab3-45a4-8057-042c3f583849-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 30 11:59:22 crc kubenswrapper[4703]: I0130 11:59:22.369882 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c868f7b8-6ab3-45a4-8057-042c3f583849-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 11:59:22 crc kubenswrapper[4703]: I0130 11:59:22.744786 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:22 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:22 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:22 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:22 crc kubenswrapper[4703]: I0130 11:59:22.744859 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:22 crc kubenswrapper[4703]: I0130 11:59:22.759805 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"c868f7b8-6ab3-45a4-8057-042c3f583849","Type":"ContainerDied","Data":"64becde19da3b0b2a0781a985aeea170582fea349f8c758b22ffdd6ecbd6dab8"} Jan 30 11:59:22 crc kubenswrapper[4703]: I0130 11:59:22.759911 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64becde19da3b0b2a0781a985aeea170582fea349f8c758b22ffdd6ecbd6dab8" Jan 30 11:59:22 crc kubenswrapper[4703]: I0130 11:59:22.760012 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 30 11:59:23 crc kubenswrapper[4703]: I0130 11:59:23.584438 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:23 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:23 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:23 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:23 crc kubenswrapper[4703]: I0130 11:59:23.584594 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:24 crc kubenswrapper[4703]: I0130 11:59:24.561983 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:24 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:24 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:24 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:24 crc kubenswrapper[4703]: I0130 11:59:24.562443 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:25 crc kubenswrapper[4703]: I0130 11:59:25.163311 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 11:59:25 crc kubenswrapper[4703]: I0130 11:59:25.546171 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:25 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:25 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:25 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:25 crc kubenswrapper[4703]: I0130 11:59:25.546237 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:26 crc kubenswrapper[4703]: I0130 11:59:26.541532 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:26 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:26 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:26 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:26 crc kubenswrapper[4703]: I0130 11:59:26.541648 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:27 crc kubenswrapper[4703]: I0130 11:59:27.450031 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:59:27 crc kubenswrapper[4703]: I0130 11:59:27.450136 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:59:27 crc kubenswrapper[4703]: I0130 11:59:27.450537 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:59:27 crc kubenswrapper[4703]: I0130 11:59:27.450577 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:59:27 crc kubenswrapper[4703]: I0130 11:59:27.663682 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:27 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:27 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:27 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:27 crc kubenswrapper[4703]: I0130 11:59:27.663799 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:28 crc kubenswrapper[4703]: I0130 11:59:28.583664 4703 patch_prober.go:28] interesting pod/console-f9d7485db-mxjx6 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.19:8443/health\": dial tcp 10.217.0.19:8443: connect: connection refused" start-of-body= Jan 30 11:59:28 crc kubenswrapper[4703]: I0130 11:59:28.584048 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-mxjx6" podUID="aa4aaaa5-396e-4e62-92a3-74b835af58a7" containerName="console" probeResult="failure" output="Get \"https://10.217.0.19:8443/health\": dial tcp 10.217.0.19:8443: connect: connection refused" Jan 30 11:59:28 crc kubenswrapper[4703]: I0130 11:59:28.590979 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:28 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:28 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:28 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:28 crc kubenswrapper[4703]: I0130 11:59:28.591054 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:28 crc kubenswrapper[4703]: I0130 11:59:28.603321 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4xxv7" Jan 30 11:59:29 crc kubenswrapper[4703]: I0130 11:59:29.748381 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:29 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:29 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:29 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:29 crc kubenswrapper[4703]: I0130 11:59:29.748437 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:30 crc kubenswrapper[4703]: I0130 11:59:30.571917 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:30 crc kubenswrapper[4703]: [-]has-synced failed: reason withheld Jan 30 11:59:30 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:30 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:30 crc kubenswrapper[4703]: I0130 11:59:30.572045 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:31 crc kubenswrapper[4703]: I0130 11:59:31.567622 4703 patch_prober.go:28] interesting pod/router-default-5444994796-qzs78 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 30 11:59:31 crc kubenswrapper[4703]: [+]has-synced ok Jan 30 11:59:31 crc kubenswrapper[4703]: [+]process-running ok Jan 30 11:59:31 crc kubenswrapper[4703]: healthz check failed Jan 30 11:59:31 crc kubenswrapper[4703]: I0130 11:59:31.567717 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-qzs78" podUID="db60e754-3b34-4d6d-8d4c-46384bc04d60" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 11:59:32 crc kubenswrapper[4703]: I0130 11:59:32.557185 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:59:32 crc kubenswrapper[4703]: I0130 11:59:32.987625 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-qzs78" Jan 30 11:59:37 crc kubenswrapper[4703]: I0130 11:59:37.386702 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:59:37 crc kubenswrapper[4703]: I0130 11:59:37.387007 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:59:37 crc kubenswrapper[4703]: I0130 11:59:37.387773 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:59:37 crc kubenswrapper[4703]: I0130 11:59:37.388491 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:59:38 crc kubenswrapper[4703]: I0130 11:59:38.437951 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:59:38 crc kubenswrapper[4703]: I0130 11:59:38.462751 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 11:59:42 crc kubenswrapper[4703]: I0130 11:59:42.823375 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 11:59:42 crc kubenswrapper[4703]: I0130 11:59:42.824159 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 11:59:42 crc kubenswrapper[4703]: I0130 11:59:42.824236 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 11:59:42 crc kubenswrapper[4703]: I0130 11:59:42.824872 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed"} pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 11:59:42 crc kubenswrapper[4703]: I0130 11:59:42.824963 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" containerID="cri-o://58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed" gracePeriod=600 Jan 30 11:59:45 crc kubenswrapper[4703]: I0130 11:59:45.652734 4703 generic.go:334] "Generic (PLEG): container finished" podID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerID="58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed" exitCode=0 Jan 30 11:59:45 crc kubenswrapper[4703]: I0130 11:59:45.653087 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerDied","Data":"58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed"} Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.377218 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.377292 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.377819 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.377849 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.377878 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-q7gv4" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.378817 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"7bf15f880cc782893ac898aa58150394186eb0ba2d8ccb1292bea472dbc54b9c"} pod="openshift-console/downloads-7954f5f757-q7gv4" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.378897 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" containerID="cri-o://7bf15f880cc782893ac898aa58150394186eb0ba2d8ccb1292bea472dbc54b9c" gracePeriod=2 Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.379576 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.379600 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.621305 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 30 11:59:47 crc kubenswrapper[4703]: E0130 11:59:47.621792 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df97e9d9-5d6d-49e9-972d-84fbfb77cb2f" containerName="pruner" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.621817 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="df97e9d9-5d6d-49e9-972d-84fbfb77cb2f" containerName="pruner" Jan 30 11:59:47 crc kubenswrapper[4703]: E0130 11:59:47.621843 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2eaf3e49-2394-495c-926e-5504ff81ccc5" containerName="collect-profiles" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.621858 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eaf3e49-2394-495c-926e-5504ff81ccc5" containerName="collect-profiles" Jan 30 11:59:47 crc kubenswrapper[4703]: E0130 11:59:47.621882 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c868f7b8-6ab3-45a4-8057-042c3f583849" containerName="pruner" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.621891 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="c868f7b8-6ab3-45a4-8057-042c3f583849" containerName="pruner" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.622064 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="df97e9d9-5d6d-49e9-972d-84fbfb77cb2f" containerName="pruner" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.622079 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eaf3e49-2394-495c-926e-5504ff81ccc5" containerName="collect-profiles" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.622112 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="c868f7b8-6ab3-45a4-8057-042c3f583849" containerName="pruner" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.622690 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.622810 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.627669 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.628102 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.786569 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cf6547d4-21f5-4068-b3b2-e2e070b07310-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"cf6547d4-21f5-4068-b3b2-e2e070b07310\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.786636 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cf6547d4-21f5-4068-b3b2-e2e070b07310-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"cf6547d4-21f5-4068-b3b2-e2e070b07310\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.891590 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cf6547d4-21f5-4068-b3b2-e2e070b07310-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"cf6547d4-21f5-4068-b3b2-e2e070b07310\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.891814 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cf6547d4-21f5-4068-b3b2-e2e070b07310-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"cf6547d4-21f5-4068-b3b2-e2e070b07310\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.892370 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cf6547d4-21f5-4068-b3b2-e2e070b07310-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"cf6547d4-21f5-4068-b3b2-e2e070b07310\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.970188 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cf6547d4-21f5-4068-b3b2-e2e070b07310-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"cf6547d4-21f5-4068-b3b2-e2e070b07310\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 30 11:59:47 crc kubenswrapper[4703]: I0130 11:59:47.977706 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 30 11:59:51 crc kubenswrapper[4703]: I0130 11:59:51.616611 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 30 11:59:51 crc kubenswrapper[4703]: I0130 11:59:51.619770 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 30 11:59:51 crc kubenswrapper[4703]: I0130 11:59:51.624139 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 30 11:59:51 crc kubenswrapper[4703]: I0130 11:59:51.807042 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/69ea44cc-6b13-4b1e-9621-448115bc3090-kube-api-access\") pod \"installer-9-crc\" (UID: \"69ea44cc-6b13-4b1e-9621-448115bc3090\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 30 11:59:51 crc kubenswrapper[4703]: I0130 11:59:51.807203 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/69ea44cc-6b13-4b1e-9621-448115bc3090-kubelet-dir\") pod \"installer-9-crc\" (UID: \"69ea44cc-6b13-4b1e-9621-448115bc3090\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 30 11:59:51 crc kubenswrapper[4703]: I0130 11:59:51.807323 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/69ea44cc-6b13-4b1e-9621-448115bc3090-var-lock\") pod \"installer-9-crc\" (UID: \"69ea44cc-6b13-4b1e-9621-448115bc3090\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 30 11:59:51 crc kubenswrapper[4703]: I0130 11:59:51.909265 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/69ea44cc-6b13-4b1e-9621-448115bc3090-var-lock\") pod \"installer-9-crc\" (UID: \"69ea44cc-6b13-4b1e-9621-448115bc3090\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 30 11:59:51 crc kubenswrapper[4703]: I0130 11:59:51.909328 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/69ea44cc-6b13-4b1e-9621-448115bc3090-kube-api-access\") pod \"installer-9-crc\" (UID: \"69ea44cc-6b13-4b1e-9621-448115bc3090\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 30 11:59:51 crc kubenswrapper[4703]: I0130 11:59:51.909381 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/69ea44cc-6b13-4b1e-9621-448115bc3090-kubelet-dir\") pod \"installer-9-crc\" (UID: \"69ea44cc-6b13-4b1e-9621-448115bc3090\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 30 11:59:51 crc kubenswrapper[4703]: I0130 11:59:51.909424 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/69ea44cc-6b13-4b1e-9621-448115bc3090-var-lock\") pod \"installer-9-crc\" (UID: \"69ea44cc-6b13-4b1e-9621-448115bc3090\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 30 11:59:51 crc kubenswrapper[4703]: I0130 11:59:51.909457 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/69ea44cc-6b13-4b1e-9621-448115bc3090-kubelet-dir\") pod \"installer-9-crc\" (UID: \"69ea44cc-6b13-4b1e-9621-448115bc3090\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 30 11:59:51 crc kubenswrapper[4703]: I0130 11:59:51.986391 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/69ea44cc-6b13-4b1e-9621-448115bc3090-kube-api-access\") pod \"installer-9-crc\" (UID: \"69ea44cc-6b13-4b1e-9621-448115bc3090\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 30 11:59:52 crc kubenswrapper[4703]: I0130 11:59:52.334007 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 30 11:59:57 crc kubenswrapper[4703]: I0130 11:59:57.358698 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 11:59:57 crc kubenswrapper[4703]: I0130 11:59:57.359016 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:00:00 crc kubenswrapper[4703]: I0130 12:00:00.158481 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld"] Jan 30 12:00:00 crc kubenswrapper[4703]: I0130 12:00:00.159513 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld" Jan 30 12:00:00 crc kubenswrapper[4703]: I0130 12:00:00.162262 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 30 12:00:00 crc kubenswrapper[4703]: I0130 12:00:00.162353 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 30 12:00:00 crc kubenswrapper[4703]: I0130 12:00:00.169977 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld"] Jan 30 12:00:00 crc kubenswrapper[4703]: I0130 12:00:00.278014 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/178b5fda-ccd1-492b-9d7f-5a44efecaaac-config-volume\") pod \"collect-profiles-29496240-g78ld\" (UID: \"178b5fda-ccd1-492b-9d7f-5a44efecaaac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld" Jan 30 12:00:00 crc kubenswrapper[4703]: I0130 12:00:00.278778 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gz7f2\" (UniqueName: \"kubernetes.io/projected/178b5fda-ccd1-492b-9d7f-5a44efecaaac-kube-api-access-gz7f2\") pod \"collect-profiles-29496240-g78ld\" (UID: \"178b5fda-ccd1-492b-9d7f-5a44efecaaac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld" Jan 30 12:00:00 crc kubenswrapper[4703]: I0130 12:00:00.278843 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/178b5fda-ccd1-492b-9d7f-5a44efecaaac-secret-volume\") pod \"collect-profiles-29496240-g78ld\" (UID: \"178b5fda-ccd1-492b-9d7f-5a44efecaaac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld" Jan 30 12:00:00 crc kubenswrapper[4703]: I0130 12:00:00.379618 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gz7f2\" (UniqueName: \"kubernetes.io/projected/178b5fda-ccd1-492b-9d7f-5a44efecaaac-kube-api-access-gz7f2\") pod \"collect-profiles-29496240-g78ld\" (UID: \"178b5fda-ccd1-492b-9d7f-5a44efecaaac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld" Jan 30 12:00:00 crc kubenswrapper[4703]: I0130 12:00:00.379669 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/178b5fda-ccd1-492b-9d7f-5a44efecaaac-secret-volume\") pod \"collect-profiles-29496240-g78ld\" (UID: \"178b5fda-ccd1-492b-9d7f-5a44efecaaac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld" Jan 30 12:00:00 crc kubenswrapper[4703]: I0130 12:00:00.379723 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/178b5fda-ccd1-492b-9d7f-5a44efecaaac-config-volume\") pod \"collect-profiles-29496240-g78ld\" (UID: \"178b5fda-ccd1-492b-9d7f-5a44efecaaac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld" Jan 30 12:00:00 crc kubenswrapper[4703]: I0130 12:00:00.381536 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/178b5fda-ccd1-492b-9d7f-5a44efecaaac-config-volume\") pod \"collect-profiles-29496240-g78ld\" (UID: \"178b5fda-ccd1-492b-9d7f-5a44efecaaac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld" Jan 30 12:00:00 crc kubenswrapper[4703]: I0130 12:00:00.389371 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/178b5fda-ccd1-492b-9d7f-5a44efecaaac-secret-volume\") pod \"collect-profiles-29496240-g78ld\" (UID: \"178b5fda-ccd1-492b-9d7f-5a44efecaaac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld" Jan 30 12:00:00 crc kubenswrapper[4703]: I0130 12:00:00.413612 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gz7f2\" (UniqueName: \"kubernetes.io/projected/178b5fda-ccd1-492b-9d7f-5a44efecaaac-kube-api-access-gz7f2\") pod \"collect-profiles-29496240-g78ld\" (UID: \"178b5fda-ccd1-492b-9d7f-5a44efecaaac\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld" Jan 30 12:00:00 crc kubenswrapper[4703]: I0130 12:00:00.493521 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld" Jan 30 12:00:01 crc kubenswrapper[4703]: I0130 12:00:01.709299 4703 generic.go:334] "Generic (PLEG): container finished" podID="1441806f-c27a-4a93-82e3-123caba174c5" containerID="7bf15f880cc782893ac898aa58150394186eb0ba2d8ccb1292bea472dbc54b9c" exitCode=0 Jan 30 12:00:01 crc kubenswrapper[4703]: I0130 12:00:01.709381 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-q7gv4" event={"ID":"1441806f-c27a-4a93-82e3-123caba174c5","Type":"ContainerDied","Data":"7bf15f880cc782893ac898aa58150394186eb0ba2d8ccb1292bea472dbc54b9c"} Jan 30 12:00:01 crc kubenswrapper[4703]: I0130 12:00:01.710015 4703 scope.go:117] "RemoveContainer" containerID="84c44c7e0ca622cf587ac57b54dde29fd5b63a01d41da8a617c675860133ca6e" Jan 30 12:00:07 crc kubenswrapper[4703]: I0130 12:00:07.357985 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:00:07 crc kubenswrapper[4703]: I0130 12:00:07.358736 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:00:17 crc kubenswrapper[4703]: I0130 12:00:17.358964 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:00:17 crc kubenswrapper[4703]: I0130 12:00:17.359585 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:00:18 crc kubenswrapper[4703]: E0130 12:00:18.110998 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 30 12:00:18 crc kubenswrapper[4703]: E0130 12:00:18.111258 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6tq7t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-vztsq_openshift-marketplace(bdfc1d61-7a35-49e6-a091-83226033a80f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 30 12:00:18 crc kubenswrapper[4703]: E0130 12:00:18.112452 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-vztsq" podUID="bdfc1d61-7a35-49e6-a091-83226033a80f" Jan 30 12:00:20 crc kubenswrapper[4703]: E0130 12:00:20.027617 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-vztsq" podUID="bdfc1d61-7a35-49e6-a091-83226033a80f" Jan 30 12:00:20 crc kubenswrapper[4703]: E0130 12:00:20.115780 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 30 12:00:20 crc kubenswrapper[4703]: E0130 12:00:20.116206 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4tnmw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-rsvj7_openshift-marketplace(11176772-9170-499f-8fec-e460709fd300): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 30 12:00:20 crc kubenswrapper[4703]: E0130 12:00:20.117438 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-rsvj7" podUID="11176772-9170-499f-8fec-e460709fd300" Jan 30 12:00:26 crc kubenswrapper[4703]: I0130 12:00:26.485550 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-d4tv6"] Jan 30 12:00:26 crc kubenswrapper[4703]: E0130 12:00:26.621278 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-rsvj7" podUID="11176772-9170-499f-8fec-e460709fd300" Jan 30 12:00:26 crc kubenswrapper[4703]: E0130 12:00:26.970263 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 30 12:00:26 crc kubenswrapper[4703]: E0130 12:00:26.970447 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8gvnv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-ngspg_openshift-marketplace(b8c48ae5-5f36-4ab5-b6a8-30e51be6d436): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 30 12:00:26 crc kubenswrapper[4703]: E0130 12:00:26.971707 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-ngspg" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" Jan 30 12:00:27 crc kubenswrapper[4703]: I0130 12:00:27.358778 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:00:27 crc kubenswrapper[4703]: I0130 12:00:27.358851 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.123661 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-ngspg" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.195485 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.195701 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xnjdm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-m9qpx_openshift-marketplace(f49515e4-f9bb-4741-a979-5d59fbc7198d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.196961 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-m9qpx" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.206766 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.206952 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-546ch,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-8gf7w_openshift-marketplace(2bfef4b4-9e99-4b51-bec9-2e6619cdbc63): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.208211 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-8gf7w" podUID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.257435 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.257948 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cdwdv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-988gc_openshift-marketplace(4e4915d0-912f-426c-9d74-3d42e36678ed): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.259084 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-988gc" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.270675 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.270767 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-p75v5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-xzz7n_openshift-marketplace(61693ed0-e352-4c89-9076-be1acb1a0bfe): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.293761 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.293914 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wh47z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-64ptg_openshift-marketplace(7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.295468 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-xzz7n" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.295532 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-64ptg" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" Jan 30 12:00:29 crc kubenswrapper[4703]: I0130 12:00:29.754871 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerStarted","Data":"24f8d677e145509afc38b556c3f5389f822557c4fa4acc9a9cf095e193cc4c81"} Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.777735 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-988gc" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" Jan 30 12:00:29 crc kubenswrapper[4703]: I0130 12:00:29.777665 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-q7gv4" event={"ID":"1441806f-c27a-4a93-82e3-123caba174c5","Type":"ContainerStarted","Data":"4158b38180736b651a831dbb8b4516357f58ece630e8a346991757fe510f7ad8"} Jan 30 12:00:29 crc kubenswrapper[4703]: I0130 12:00:29.779513 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-q7gv4" Jan 30 12:00:29 crc kubenswrapper[4703]: I0130 12:00:29.779965 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:00:29 crc kubenswrapper[4703]: I0130 12:00:29.779997 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.780083 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-m9qpx" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.780165 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-xzz7n" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.780895 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-64ptg" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" Jan 30 12:00:29 crc kubenswrapper[4703]: E0130 12:00:29.780224 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-8gf7w" podUID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" Jan 30 12:00:29 crc kubenswrapper[4703]: I0130 12:00:29.819978 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 30 12:00:30 crc kubenswrapper[4703]: I0130 12:00:30.037989 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 30 12:00:30 crc kubenswrapper[4703]: I0130 12:00:30.048416 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld"] Jan 30 12:00:30 crc kubenswrapper[4703]: W0130 12:00:30.055342 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podcf6547d4_21f5_4068_b3b2_e2e070b07310.slice/crio-b30c0c32ca961f05d3ec2b498ce908660bfd3c45b0640083f63efc7654b2e2c8 WatchSource:0}: Error finding container b30c0c32ca961f05d3ec2b498ce908660bfd3c45b0640083f63efc7654b2e2c8: Status 404 returned error can't find the container with id b30c0c32ca961f05d3ec2b498ce908660bfd3c45b0640083f63efc7654b2e2c8 Jan 30 12:00:30 crc kubenswrapper[4703]: I0130 12:00:30.780569 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"69ea44cc-6b13-4b1e-9621-448115bc3090","Type":"ContainerStarted","Data":"d095acdf6ead9d9cf62a498001d5667aaa9e862a52fad9a88d291040ad7ebd64"} Jan 30 12:00:30 crc kubenswrapper[4703]: I0130 12:00:30.781146 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"69ea44cc-6b13-4b1e-9621-448115bc3090","Type":"ContainerStarted","Data":"043ef4ef6b71811cdfcfefca34acd3b65c6870f58f724ee1455bc546a0b56899"} Jan 30 12:00:30 crc kubenswrapper[4703]: I0130 12:00:30.782139 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"cf6547d4-21f5-4068-b3b2-e2e070b07310","Type":"ContainerStarted","Data":"b5163f102805e7f1fea1338c4591180ff9534baf38944d62bb4e6b20ee743cf6"} Jan 30 12:00:30 crc kubenswrapper[4703]: I0130 12:00:30.782166 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"cf6547d4-21f5-4068-b3b2-e2e070b07310","Type":"ContainerStarted","Data":"b30c0c32ca961f05d3ec2b498ce908660bfd3c45b0640083f63efc7654b2e2c8"} Jan 30 12:00:30 crc kubenswrapper[4703]: I0130 12:00:30.784518 4703 generic.go:334] "Generic (PLEG): container finished" podID="178b5fda-ccd1-492b-9d7f-5a44efecaaac" containerID="afd2ce35861b11d0750c6803d3c3b6fff9e34ea4c8c679c9bee26432c00fe539" exitCode=0 Jan 30 12:00:30 crc kubenswrapper[4703]: I0130 12:00:30.784970 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld" event={"ID":"178b5fda-ccd1-492b-9d7f-5a44efecaaac","Type":"ContainerDied","Data":"afd2ce35861b11d0750c6803d3c3b6fff9e34ea4c8c679c9bee26432c00fe539"} Jan 30 12:00:30 crc kubenswrapper[4703]: I0130 12:00:30.784996 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld" event={"ID":"178b5fda-ccd1-492b-9d7f-5a44efecaaac","Type":"ContainerStarted","Data":"9eed66f75e4a2a9da1a358d9c26b44e1ce788e6aab532f87c5a630dfa3b6873d"} Jan 30 12:00:30 crc kubenswrapper[4703]: I0130 12:00:30.785697 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:00:30 crc kubenswrapper[4703]: I0130 12:00:30.785734 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:00:30 crc kubenswrapper[4703]: I0130 12:00:30.815460 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=39.815434183 podStartE2EDuration="39.815434183s" podCreationTimestamp="2026-01-30 11:59:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:00:30.799067667 +0000 UTC m=+266.576889321" watchObservedRunningTime="2026-01-30 12:00:30.815434183 +0000 UTC m=+266.593255837" Jan 30 12:00:30 crc kubenswrapper[4703]: I0130 12:00:30.830768 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=43.830748189 podStartE2EDuration="43.830748189s" podCreationTimestamp="2026-01-30 11:59:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:00:30.82563645 +0000 UTC m=+266.603458114" watchObservedRunningTime="2026-01-30 12:00:30.830748189 +0000 UTC m=+266.608569843" Jan 30 12:00:31 crc kubenswrapper[4703]: I0130 12:00:31.791689 4703 generic.go:334] "Generic (PLEG): container finished" podID="cf6547d4-21f5-4068-b3b2-e2e070b07310" containerID="b5163f102805e7f1fea1338c4591180ff9534baf38944d62bb4e6b20ee743cf6" exitCode=0 Jan 30 12:00:31 crc kubenswrapper[4703]: I0130 12:00:31.791788 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"cf6547d4-21f5-4068-b3b2-e2e070b07310","Type":"ContainerDied","Data":"b5163f102805e7f1fea1338c4591180ff9534baf38944d62bb4e6b20ee743cf6"} Jan 30 12:00:31 crc kubenswrapper[4703]: I0130 12:00:31.795732 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:00:31 crc kubenswrapper[4703]: I0130 12:00:31.795796 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:00:32 crc kubenswrapper[4703]: I0130 12:00:32.054232 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld" Jan 30 12:00:32 crc kubenswrapper[4703]: I0130 12:00:32.176250 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/178b5fda-ccd1-492b-9d7f-5a44efecaaac-secret-volume\") pod \"178b5fda-ccd1-492b-9d7f-5a44efecaaac\" (UID: \"178b5fda-ccd1-492b-9d7f-5a44efecaaac\") " Jan 30 12:00:32 crc kubenswrapper[4703]: I0130 12:00:32.176343 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gz7f2\" (UniqueName: \"kubernetes.io/projected/178b5fda-ccd1-492b-9d7f-5a44efecaaac-kube-api-access-gz7f2\") pod \"178b5fda-ccd1-492b-9d7f-5a44efecaaac\" (UID: \"178b5fda-ccd1-492b-9d7f-5a44efecaaac\") " Jan 30 12:00:32 crc kubenswrapper[4703]: I0130 12:00:32.176462 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/178b5fda-ccd1-492b-9d7f-5a44efecaaac-config-volume\") pod \"178b5fda-ccd1-492b-9d7f-5a44efecaaac\" (UID: \"178b5fda-ccd1-492b-9d7f-5a44efecaaac\") " Jan 30 12:00:32 crc kubenswrapper[4703]: I0130 12:00:32.177159 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/178b5fda-ccd1-492b-9d7f-5a44efecaaac-config-volume" (OuterVolumeSpecName: "config-volume") pod "178b5fda-ccd1-492b-9d7f-5a44efecaaac" (UID: "178b5fda-ccd1-492b-9d7f-5a44efecaaac"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:00:32 crc kubenswrapper[4703]: I0130 12:00:32.181714 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/178b5fda-ccd1-492b-9d7f-5a44efecaaac-kube-api-access-gz7f2" (OuterVolumeSpecName: "kube-api-access-gz7f2") pod "178b5fda-ccd1-492b-9d7f-5a44efecaaac" (UID: "178b5fda-ccd1-492b-9d7f-5a44efecaaac"). InnerVolumeSpecName "kube-api-access-gz7f2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:00:32 crc kubenswrapper[4703]: I0130 12:00:32.181771 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/178b5fda-ccd1-492b-9d7f-5a44efecaaac-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "178b5fda-ccd1-492b-9d7f-5a44efecaaac" (UID: "178b5fda-ccd1-492b-9d7f-5a44efecaaac"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:00:32 crc kubenswrapper[4703]: I0130 12:00:32.277516 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gz7f2\" (UniqueName: \"kubernetes.io/projected/178b5fda-ccd1-492b-9d7f-5a44efecaaac-kube-api-access-gz7f2\") on node \"crc\" DevicePath \"\"" Jan 30 12:00:32 crc kubenswrapper[4703]: I0130 12:00:32.277552 4703 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/178b5fda-ccd1-492b-9d7f-5a44efecaaac-config-volume\") on node \"crc\" DevicePath \"\"" Jan 30 12:00:32 crc kubenswrapper[4703]: I0130 12:00:32.277561 4703 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/178b5fda-ccd1-492b-9d7f-5a44efecaaac-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 30 12:00:32 crc kubenswrapper[4703]: I0130 12:00:32.800310 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld" Jan 30 12:00:32 crc kubenswrapper[4703]: I0130 12:00:32.800311 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld" event={"ID":"178b5fda-ccd1-492b-9d7f-5a44efecaaac","Type":"ContainerDied","Data":"9eed66f75e4a2a9da1a358d9c26b44e1ce788e6aab532f87c5a630dfa3b6873d"} Jan 30 12:00:32 crc kubenswrapper[4703]: I0130 12:00:32.800383 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9eed66f75e4a2a9da1a358d9c26b44e1ce788e6aab532f87c5a630dfa3b6873d" Jan 30 12:00:33 crc kubenswrapper[4703]: I0130 12:00:33.061787 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 30 12:00:33 crc kubenswrapper[4703]: I0130 12:00:33.190709 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cf6547d4-21f5-4068-b3b2-e2e070b07310-kubelet-dir\") pod \"cf6547d4-21f5-4068-b3b2-e2e070b07310\" (UID: \"cf6547d4-21f5-4068-b3b2-e2e070b07310\") " Jan 30 12:00:33 crc kubenswrapper[4703]: I0130 12:00:33.190781 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cf6547d4-21f5-4068-b3b2-e2e070b07310-kube-api-access\") pod \"cf6547d4-21f5-4068-b3b2-e2e070b07310\" (UID: \"cf6547d4-21f5-4068-b3b2-e2e070b07310\") " Jan 30 12:00:33 crc kubenswrapper[4703]: I0130 12:00:33.190898 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/cf6547d4-21f5-4068-b3b2-e2e070b07310-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "cf6547d4-21f5-4068-b3b2-e2e070b07310" (UID: "cf6547d4-21f5-4068-b3b2-e2e070b07310"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:00:33 crc kubenswrapper[4703]: I0130 12:00:33.191148 4703 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/cf6547d4-21f5-4068-b3b2-e2e070b07310-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 30 12:00:33 crc kubenswrapper[4703]: I0130 12:00:33.195432 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf6547d4-21f5-4068-b3b2-e2e070b07310-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "cf6547d4-21f5-4068-b3b2-e2e070b07310" (UID: "cf6547d4-21f5-4068-b3b2-e2e070b07310"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:00:33 crc kubenswrapper[4703]: I0130 12:00:33.292039 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/cf6547d4-21f5-4068-b3b2-e2e070b07310-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 12:00:33 crc kubenswrapper[4703]: I0130 12:00:33.807142 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"cf6547d4-21f5-4068-b3b2-e2e070b07310","Type":"ContainerDied","Data":"b30c0c32ca961f05d3ec2b498ce908660bfd3c45b0640083f63efc7654b2e2c8"} Jan 30 12:00:33 crc kubenswrapper[4703]: I0130 12:00:33.807197 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b30c0c32ca961f05d3ec2b498ce908660bfd3c45b0640083f63efc7654b2e2c8" Jan 30 12:00:33 crc kubenswrapper[4703]: I0130 12:00:33.807267 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 30 12:00:35 crc kubenswrapper[4703]: I0130 12:00:35.015382 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 12:00:35 crc kubenswrapper[4703]: I0130 12:00:35.015774 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 12:00:35 crc kubenswrapper[4703]: I0130 12:00:35.017882 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 30 12:00:35 crc kubenswrapper[4703]: I0130 12:00:35.017960 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 30 12:00:35 crc kubenswrapper[4703]: I0130 12:00:35.036146 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 12:00:35 crc kubenswrapper[4703]: I0130 12:00:35.051413 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 12:00:35 crc kubenswrapper[4703]: I0130 12:00:35.116705 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 12:00:35 crc kubenswrapper[4703]: I0130 12:00:35.116786 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 12:00:35 crc kubenswrapper[4703]: I0130 12:00:35.119089 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 30 12:00:35 crc kubenswrapper[4703]: I0130 12:00:35.129732 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 30 12:00:35 crc kubenswrapper[4703]: I0130 12:00:35.142491 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 12:00:35 crc kubenswrapper[4703]: I0130 12:00:35.142545 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 12:00:35 crc kubenswrapper[4703]: I0130 12:00:35.207778 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 12:00:35 crc kubenswrapper[4703]: I0130 12:00:35.218386 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 30 12:00:35 crc kubenswrapper[4703]: I0130 12:00:35.298659 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 30 12:00:35 crc kubenswrapper[4703]: W0130 12:00:35.852029 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-47d5cac51e6acc7180b6e1f365b846324e78549fa99db6eb1a9352deb90af835 WatchSource:0}: Error finding container 47d5cac51e6acc7180b6e1f365b846324e78549fa99db6eb1a9352deb90af835: Status 404 returned error can't find the container with id 47d5cac51e6acc7180b6e1f365b846324e78549fa99db6eb1a9352deb90af835 Jan 30 12:00:35 crc kubenswrapper[4703]: I0130 12:00:35.854606 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"fd9f77da91209a351083c68329c7d39e1607cd54e2921a4dbb71e5e3897054f8"} Jan 30 12:00:35 crc kubenswrapper[4703]: I0130 12:00:35.855914 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vztsq" event={"ID":"bdfc1d61-7a35-49e6-a091-83226033a80f","Type":"ContainerStarted","Data":"054b0e9a8b0897cd2468cad45bc18e82cd5961cc135f5a8146b588feebe1af01"} Jan 30 12:00:35 crc kubenswrapper[4703]: W0130 12:00:35.861860 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-16491c5bd8b0e640de452158f12be80675b4316fd82e382df05ad11b9aa0900a WatchSource:0}: Error finding container 16491c5bd8b0e640de452158f12be80675b4316fd82e382df05ad11b9aa0900a: Status 404 returned error can't find the container with id 16491c5bd8b0e640de452158f12be80675b4316fd82e382df05ad11b9aa0900a Jan 30 12:00:36 crc kubenswrapper[4703]: I0130 12:00:36.861796 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"55d285544bdbf7d7ad74a373822d12ea4903675e6405f509962ce876e2377022"} Jan 30 12:00:36 crc kubenswrapper[4703]: I0130 12:00:36.862266 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"47d5cac51e6acc7180b6e1f365b846324e78549fa99db6eb1a9352deb90af835"} Jan 30 12:00:36 crc kubenswrapper[4703]: I0130 12:00:36.863839 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"409c69d03810badd465121daed2365b97eb9f87f05729056dc560cd3b94620e8"} Jan 30 12:00:36 crc kubenswrapper[4703]: I0130 12:00:36.863870 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"16491c5bd8b0e640de452158f12be80675b4316fd82e382df05ad11b9aa0900a"} Jan 30 12:00:36 crc kubenswrapper[4703]: I0130 12:00:36.864961 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"f9f025a3e7ca3b388a8aed231009f38417428a64297b47c2b184960f58927ce5"} Jan 30 12:00:36 crc kubenswrapper[4703]: I0130 12:00:36.865072 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 12:00:36 crc kubenswrapper[4703]: I0130 12:00:36.867039 4703 generic.go:334] "Generic (PLEG): container finished" podID="bdfc1d61-7a35-49e6-a091-83226033a80f" containerID="054b0e9a8b0897cd2468cad45bc18e82cd5961cc135f5a8146b588feebe1af01" exitCode=0 Jan 30 12:00:36 crc kubenswrapper[4703]: I0130 12:00:36.867067 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vztsq" event={"ID":"bdfc1d61-7a35-49e6-a091-83226033a80f","Type":"ContainerDied","Data":"054b0e9a8b0897cd2468cad45bc18e82cd5961cc135f5a8146b588feebe1af01"} Jan 30 12:00:37 crc kubenswrapper[4703]: I0130 12:00:37.358181 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:00:37 crc kubenswrapper[4703]: I0130 12:00:37.358233 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:00:37 crc kubenswrapper[4703]: I0130 12:00:37.358282 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:00:37 crc kubenswrapper[4703]: I0130 12:00:37.358367 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:00:40 crc kubenswrapper[4703]: I0130 12:00:40.891336 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vztsq" event={"ID":"bdfc1d61-7a35-49e6-a091-83226033a80f","Type":"ContainerStarted","Data":"5fccfb130259e93a2469508456a1cf451effcd5ea7d5b6db858cae17276192c7"} Jan 30 12:00:40 crc kubenswrapper[4703]: I0130 12:00:40.912255 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vztsq" podStartSLOduration=8.855664785 podStartE2EDuration="1m36.91222295s" podCreationTimestamp="2026-01-30 11:59:04 +0000 UTC" firstStartedPulling="2026-01-30 11:59:11.734876789 +0000 UTC m=+187.512698443" lastFinishedPulling="2026-01-30 12:00:39.791434954 +0000 UTC m=+275.569256608" observedRunningTime="2026-01-30 12:00:40.910104849 +0000 UTC m=+276.687926503" watchObservedRunningTime="2026-01-30 12:00:40.91222295 +0000 UTC m=+276.690044614" Jan 30 12:00:46 crc kubenswrapper[4703]: I0130 12:00:46.102177 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vztsq" Jan 30 12:00:46 crc kubenswrapper[4703]: I0130 12:00:46.102517 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vztsq" Jan 30 12:00:46 crc kubenswrapper[4703]: I0130 12:00:46.107841 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m9qpx" event={"ID":"f49515e4-f9bb-4741-a979-5d59fbc7198d","Type":"ContainerStarted","Data":"8677f7b9085b8b9af6a49b39e0813aab117bab21083d4e079489139dd839f441"} Jan 30 12:00:46 crc kubenswrapper[4703]: I0130 12:00:46.109608 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rsvj7" event={"ID":"11176772-9170-499f-8fec-e460709fd300","Type":"ContainerStarted","Data":"5a00c050e997338af2456848c7bbee7ddd83a616324a9054007734a834f5d68b"} Jan 30 12:00:47 crc kubenswrapper[4703]: I0130 12:00:47.604207 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:00:47 crc kubenswrapper[4703]: I0130 12:00:47.604475 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:00:47 crc kubenswrapper[4703]: I0130 12:00:47.604340 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:00:47 crc kubenswrapper[4703]: I0130 12:00:47.604724 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:00:48 crc kubenswrapper[4703]: I0130 12:00:48.647255 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vztsq" Jan 30 12:00:48 crc kubenswrapper[4703]: I0130 12:00:48.792487 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vztsq" Jan 30 12:00:49 crc kubenswrapper[4703]: I0130 12:00:49.979637 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-988gc" event={"ID":"4e4915d0-912f-426c-9d74-3d42e36678ed","Type":"ContainerStarted","Data":"041b55e84c7d7db02d9b79578766cbf5fd25f51b1427e3ea9adb7faaa663b309"} Jan 30 12:00:49 crc kubenswrapper[4703]: I0130 12:00:49.982141 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8gf7w" event={"ID":"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63","Type":"ContainerStarted","Data":"4b2f7b5b5fadfb4b826eac54567977d6c51b0a4f2aa7b51017a43548b7c989c3"} Jan 30 12:00:50 crc kubenswrapper[4703]: I0130 12:00:50.211386 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vztsq"] Jan 30 12:00:51 crc kubenswrapper[4703]: I0130 12:00:51.265826 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64ptg" event={"ID":"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa","Type":"ContainerStarted","Data":"a9cb12f005c18cefa1f8d06258958a47cd9dbae77b07b9321f3a63b0c2201f97"} Jan 30 12:00:51 crc kubenswrapper[4703]: I0130 12:00:51.285073 4703 generic.go:334] "Generic (PLEG): container finished" podID="f49515e4-f9bb-4741-a979-5d59fbc7198d" containerID="8677f7b9085b8b9af6a49b39e0813aab117bab21083d4e079489139dd839f441" exitCode=0 Jan 30 12:00:51 crc kubenswrapper[4703]: I0130 12:00:51.285286 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m9qpx" event={"ID":"f49515e4-f9bb-4741-a979-5d59fbc7198d","Type":"ContainerDied","Data":"8677f7b9085b8b9af6a49b39e0813aab117bab21083d4e079489139dd839f441"} Jan 30 12:00:51 crc kubenswrapper[4703]: I0130 12:00:51.299194 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ngspg" event={"ID":"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436","Type":"ContainerStarted","Data":"de852a391eee3a25e94fda187413572463abd578a30b743a54190e01ba64fb48"} Jan 30 12:00:51 crc kubenswrapper[4703]: I0130 12:00:51.318326 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xzz7n" event={"ID":"61693ed0-e352-4c89-9076-be1acb1a0bfe","Type":"ContainerStarted","Data":"dc891aede133308f7a05c165f64c0f92eb01e9fd7bda3d7232bb3ac9d9fc896b"} Jan 30 12:00:51 crc kubenswrapper[4703]: I0130 12:00:51.318584 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vztsq" podUID="bdfc1d61-7a35-49e6-a091-83226033a80f" containerName="registry-server" containerID="cri-o://5fccfb130259e93a2469508456a1cf451effcd5ea7d5b6db858cae17276192c7" gracePeriod=2 Jan 30 12:00:51 crc kubenswrapper[4703]: I0130 12:00:51.620038 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" podUID="56158b04-1a02-453d-b48a-a107343a3955" containerName="oauth-openshift" containerID="cri-o://dbeece1e92f2d9f2c556e8d001ffd7952f1584801fd06fe35cd04bb77ef31785" gracePeriod=15 Jan 30 12:00:53 crc kubenswrapper[4703]: I0130 12:00:53.953253 4703 generic.go:334] "Generic (PLEG): container finished" podID="56158b04-1a02-453d-b48a-a107343a3955" containerID="dbeece1e92f2d9f2c556e8d001ffd7952f1584801fd06fe35cd04bb77ef31785" exitCode=0 Jan 30 12:00:53 crc kubenswrapper[4703]: I0130 12:00:53.953314 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" event={"ID":"56158b04-1a02-453d-b48a-a107343a3955","Type":"ContainerDied","Data":"dbeece1e92f2d9f2c556e8d001ffd7952f1584801fd06fe35cd04bb77ef31785"} Jan 30 12:00:55 crc kubenswrapper[4703]: I0130 12:00:55.310738 4703 generic.go:334] "Generic (PLEG): container finished" podID="bdfc1d61-7a35-49e6-a091-83226033a80f" containerID="5fccfb130259e93a2469508456a1cf451effcd5ea7d5b6db858cae17276192c7" exitCode=0 Jan 30 12:00:55 crc kubenswrapper[4703]: I0130 12:00:55.310842 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vztsq" event={"ID":"bdfc1d61-7a35-49e6-a091-83226033a80f","Type":"ContainerDied","Data":"5fccfb130259e93a2469508456a1cf451effcd5ea7d5b6db858cae17276192c7"} Jan 30 12:00:56 crc kubenswrapper[4703]: E0130 12:00:56.162607 4703 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5fccfb130259e93a2469508456a1cf451effcd5ea7d5b6db858cae17276192c7 is running failed: container process not found" containerID="5fccfb130259e93a2469508456a1cf451effcd5ea7d5b6db858cae17276192c7" cmd=["grpc_health_probe","-addr=:50051"] Jan 30 12:00:56 crc kubenswrapper[4703]: E0130 12:00:56.163500 4703 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5fccfb130259e93a2469508456a1cf451effcd5ea7d5b6db858cae17276192c7 is running failed: container process not found" containerID="5fccfb130259e93a2469508456a1cf451effcd5ea7d5b6db858cae17276192c7" cmd=["grpc_health_probe","-addr=:50051"] Jan 30 12:00:56 crc kubenswrapper[4703]: E0130 12:00:56.164247 4703 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5fccfb130259e93a2469508456a1cf451effcd5ea7d5b6db858cae17276192c7 is running failed: container process not found" containerID="5fccfb130259e93a2469508456a1cf451effcd5ea7d5b6db858cae17276192c7" cmd=["grpc_health_probe","-addr=:50051"] Jan 30 12:00:56 crc kubenswrapper[4703]: E0130 12:00:56.164375 4703 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5fccfb130259e93a2469508456a1cf451effcd5ea7d5b6db858cae17276192c7 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-vztsq" podUID="bdfc1d61-7a35-49e6-a091-83226033a80f" containerName="registry-server" Jan 30 12:00:56 crc kubenswrapper[4703]: I0130 12:00:56.709411 4703 generic.go:334] "Generic (PLEG): container finished" podID="11176772-9170-499f-8fec-e460709fd300" containerID="5a00c050e997338af2456848c7bbee7ddd83a616324a9054007734a834f5d68b" exitCode=0 Jan 30 12:00:56 crc kubenswrapper[4703]: I0130 12:00:56.709776 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rsvj7" event={"ID":"11176772-9170-499f-8fec-e460709fd300","Type":"ContainerDied","Data":"5a00c050e997338af2456848c7bbee7ddd83a616324a9054007734a834f5d68b"} Jan 30 12:00:57 crc kubenswrapper[4703]: I0130 12:00:57.575378 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:00:57 crc kubenswrapper[4703]: I0130 12:00:57.575447 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:00:57 crc kubenswrapper[4703]: I0130 12:00:57.578658 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:00:57 crc kubenswrapper[4703]: I0130 12:00:57.578722 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:00:57 crc kubenswrapper[4703]: I0130 12:00:57.794525 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-q7gv4" Jan 30 12:00:57 crc kubenswrapper[4703]: I0130 12:00:57.795612 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"4158b38180736b651a831dbb8b4516357f58ece630e8a346991757fe510f7ad8"} pod="openshift-console/downloads-7954f5f757-q7gv4" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 30 12:00:57 crc kubenswrapper[4703]: I0130 12:00:57.795654 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" containerID="cri-o://4158b38180736b651a831dbb8b4516357f58ece630e8a346991757fe510f7ad8" gracePeriod=2 Jan 30 12:00:57 crc kubenswrapper[4703]: I0130 12:00:57.797357 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:00:57 crc kubenswrapper[4703]: I0130 12:00:57.797410 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:00:59 crc kubenswrapper[4703]: I0130 12:00:59.898281 4703 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-9lxbz container/packageserver namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.21:5443/healthz\": context deadline exceeded" start-of-body= Jan 30 12:00:59 crc kubenswrapper[4703]: I0130 12:00:59.898465 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-9lxbz" podUID="8879bcc7-3fe8-4982-9d10-18c8c4e9a8ba" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.21:5443/healthz\": context deadline exceeded" Jan 30 12:01:01 crc kubenswrapper[4703]: I0130 12:01:01.633744 4703 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-d4tv6 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.6:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 30 12:01:01 crc kubenswrapper[4703]: I0130 12:01:01.634114 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" podUID="56158b04-1a02-453d-b48a-a107343a3955" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.6:6443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 12:01:01 crc kubenswrapper[4703]: I0130 12:01:01.741722 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" event={"ID":"56158b04-1a02-453d-b48a-a107343a3955","Type":"ContainerDied","Data":"7cfda9553d79f006714959aeb9659721ce71f56eb4acbbfb61560207b16635d7"} Jan 30 12:01:01 crc kubenswrapper[4703]: I0130 12:01:01.742193 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7cfda9553d79f006714959aeb9659721ce71f56eb4acbbfb61560207b16635d7" Jan 30 12:01:01 crc kubenswrapper[4703]: I0130 12:01:01.968306 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.023142 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/56158b04-1a02-453d-b48a-a107343a3955-audit-dir\") pod \"56158b04-1a02-453d-b48a-a107343a3955\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.023218 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-cliconfig\") pod \"56158b04-1a02-453d-b48a-a107343a3955\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.023298 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-provider-selection\") pod \"56158b04-1a02-453d-b48a-a107343a3955\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.023323 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-session\") pod \"56158b04-1a02-453d-b48a-a107343a3955\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.023389 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-login\") pod \"56158b04-1a02-453d-b48a-a107343a3955\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.023432 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-ocp-branding-template\") pod \"56158b04-1a02-453d-b48a-a107343a3955\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.023473 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-trusted-ca-bundle\") pod \"56158b04-1a02-453d-b48a-a107343a3955\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.023515 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-service-ca\") pod \"56158b04-1a02-453d-b48a-a107343a3955\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.023549 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-router-certs\") pod \"56158b04-1a02-453d-b48a-a107343a3955\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.023576 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-error\") pod \"56158b04-1a02-453d-b48a-a107343a3955\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.023613 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-idp-0-file-data\") pod \"56158b04-1a02-453d-b48a-a107343a3955\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.023646 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-serving-cert\") pod \"56158b04-1a02-453d-b48a-a107343a3955\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.023665 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-audit-policies\") pod \"56158b04-1a02-453d-b48a-a107343a3955\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.023694 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xhth4\" (UniqueName: \"kubernetes.io/projected/56158b04-1a02-453d-b48a-a107343a3955-kube-api-access-xhth4\") pod \"56158b04-1a02-453d-b48a-a107343a3955\" (UID: \"56158b04-1a02-453d-b48a-a107343a3955\") " Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.031317 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "56158b04-1a02-453d-b48a-a107343a3955" (UID: "56158b04-1a02-453d-b48a-a107343a3955"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.031597 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/56158b04-1a02-453d-b48a-a107343a3955-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "56158b04-1a02-453d-b48a-a107343a3955" (UID: "56158b04-1a02-453d-b48a-a107343a3955"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.031688 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56158b04-1a02-453d-b48a-a107343a3955-kube-api-access-xhth4" (OuterVolumeSpecName: "kube-api-access-xhth4") pod "56158b04-1a02-453d-b48a-a107343a3955" (UID: "56158b04-1a02-453d-b48a-a107343a3955"). InnerVolumeSpecName "kube-api-access-xhth4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.032111 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "56158b04-1a02-453d-b48a-a107343a3955" (UID: "56158b04-1a02-453d-b48a-a107343a3955"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.032573 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "56158b04-1a02-453d-b48a-a107343a3955" (UID: "56158b04-1a02-453d-b48a-a107343a3955"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.037380 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "56158b04-1a02-453d-b48a-a107343a3955" (UID: "56158b04-1a02-453d-b48a-a107343a3955"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.038479 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "56158b04-1a02-453d-b48a-a107343a3955" (UID: "56158b04-1a02-453d-b48a-a107343a3955"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.041064 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "56158b04-1a02-453d-b48a-a107343a3955" (UID: "56158b04-1a02-453d-b48a-a107343a3955"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.060990 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vztsq" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.095109 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-ddfb985b5-xwlhn"] Jan 30 12:01:02 crc kubenswrapper[4703]: E0130 12:01:02.103506 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdfc1d61-7a35-49e6-a091-83226033a80f" containerName="extract-utilities" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.103538 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdfc1d61-7a35-49e6-a091-83226033a80f" containerName="extract-utilities" Jan 30 12:01:02 crc kubenswrapper[4703]: E0130 12:01:02.103560 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf6547d4-21f5-4068-b3b2-e2e070b07310" containerName="pruner" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.103573 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf6547d4-21f5-4068-b3b2-e2e070b07310" containerName="pruner" Jan 30 12:01:02 crc kubenswrapper[4703]: E0130 12:01:02.103597 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56158b04-1a02-453d-b48a-a107343a3955" containerName="oauth-openshift" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.103605 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="56158b04-1a02-453d-b48a-a107343a3955" containerName="oauth-openshift" Jan 30 12:01:02 crc kubenswrapper[4703]: E0130 12:01:02.103620 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="178b5fda-ccd1-492b-9d7f-5a44efecaaac" containerName="collect-profiles" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.103633 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="178b5fda-ccd1-492b-9d7f-5a44efecaaac" containerName="collect-profiles" Jan 30 12:01:02 crc kubenswrapper[4703]: E0130 12:01:02.103648 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdfc1d61-7a35-49e6-a091-83226033a80f" containerName="extract-content" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.103660 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdfc1d61-7a35-49e6-a091-83226033a80f" containerName="extract-content" Jan 30 12:01:02 crc kubenswrapper[4703]: E0130 12:01:02.103668 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdfc1d61-7a35-49e6-a091-83226033a80f" containerName="registry-server" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.103682 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdfc1d61-7a35-49e6-a091-83226033a80f" containerName="registry-server" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.884231 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "56158b04-1a02-453d-b48a-a107343a3955" (UID: "56158b04-1a02-453d-b48a-a107343a3955"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.888437 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdfc1d61-7a35-49e6-a091-83226033a80f" containerName="registry-server" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.888477 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="178b5fda-ccd1-492b-9d7f-5a44efecaaac" containerName="collect-profiles" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.888487 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="56158b04-1a02-453d-b48a-a107343a3955" containerName="oauth-openshift" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.888499 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf6547d4-21f5-4068-b3b2-e2e070b07310" containerName="pruner" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.889033 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.900149 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.900618 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.901025 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.901064 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.901079 4703 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.901095 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xhth4\" (UniqueName: \"kubernetes.io/projected/56158b04-1a02-453d-b48a-a107343a3955-kube-api-access-xhth4\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.901108 4703 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/56158b04-1a02-453d-b48a-a107343a3955-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.901131 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.901144 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.949441 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vztsq" event={"ID":"bdfc1d61-7a35-49e6-a091-83226033a80f","Type":"ContainerDied","Data":"06770b6c54c6f7b88cdab254bb1d7993cd49aa4342ae404c92c1e56275a6e2cc"} Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.949507 4703 scope.go:117] "RemoveContainer" containerID="5fccfb130259e93a2469508456a1cf451effcd5ea7d5b6db858cae17276192c7" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.949680 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vztsq" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.960447 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "56158b04-1a02-453d-b48a-a107343a3955" (UID: "56158b04-1a02-453d-b48a-a107343a3955"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:01:02 crc kubenswrapper[4703]: I0130 12:01:02.989049 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-ddfb985b5-xwlhn"] Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.004606 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdfc1d61-7a35-49e6-a091-83226033a80f-utilities\") pod \"bdfc1d61-7a35-49e6-a091-83226033a80f\" (UID: \"bdfc1d61-7a35-49e6-a091-83226033a80f\") " Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.004767 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tq7t\" (UniqueName: \"kubernetes.io/projected/bdfc1d61-7a35-49e6-a091-83226033a80f-kube-api-access-6tq7t\") pod \"bdfc1d61-7a35-49e6-a091-83226033a80f\" (UID: \"bdfc1d61-7a35-49e6-a091-83226033a80f\") " Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.004833 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdfc1d61-7a35-49e6-a091-83226033a80f-catalog-content\") pod \"bdfc1d61-7a35-49e6-a091-83226033a80f\" (UID: \"bdfc1d61-7a35-49e6-a091-83226033a80f\") " Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.004987 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-session\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.005579 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-service-ca\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.005638 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.005657 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-user-template-error\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.005693 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.005719 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/def3cf42-6aa7-496a-bc35-a465a83a762a-audit-dir\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.005736 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.005753 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vzmmg\" (UniqueName: \"kubernetes.io/projected/def3cf42-6aa7-496a-bc35-a465a83a762a-kube-api-access-vzmmg\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.005793 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.005857 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/def3cf42-6aa7-496a-bc35-a465a83a762a-audit-policies\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.005884 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.005907 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-router-certs\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.005940 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-user-template-login\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.005961 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.005997 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.007360 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "56158b04-1a02-453d-b48a-a107343a3955" (UID: "56158b04-1a02-453d-b48a-a107343a3955"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.014477 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "56158b04-1a02-453d-b48a-a107343a3955" (UID: "56158b04-1a02-453d-b48a-a107343a3955"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.018660 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "56158b04-1a02-453d-b48a-a107343a3955" (UID: "56158b04-1a02-453d-b48a-a107343a3955"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.020306 4703 scope.go:117] "RemoveContainer" containerID="054b0e9a8b0897cd2468cad45bc18e82cd5961cc135f5a8146b588feebe1af01" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.020949 4703 generic.go:334] "Generic (PLEG): container finished" podID="1441806f-c27a-4a93-82e3-123caba174c5" containerID="4158b38180736b651a831dbb8b4516357f58ece630e8a346991757fe510f7ad8" exitCode=0 Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.021098 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-d4tv6" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.021355 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-q7gv4" event={"ID":"1441806f-c27a-4a93-82e3-123caba174c5","Type":"ContainerDied","Data":"4158b38180736b651a831dbb8b4516357f58ece630e8a346991757fe510f7ad8"} Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.024625 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bdfc1d61-7a35-49e6-a091-83226033a80f-utilities" (OuterVolumeSpecName: "utilities") pod "bdfc1d61-7a35-49e6-a091-83226033a80f" (UID: "bdfc1d61-7a35-49e6-a091-83226033a80f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.031774 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bdfc1d61-7a35-49e6-a091-83226033a80f-kube-api-access-6tq7t" (OuterVolumeSpecName: "kube-api-access-6tq7t") pod "bdfc1d61-7a35-49e6-a091-83226033a80f" (UID: "bdfc1d61-7a35-49e6-a091-83226033a80f"). InnerVolumeSpecName "kube-api-access-6tq7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181035 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/def3cf42-6aa7-496a-bc35-a465a83a762a-audit-policies\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181112 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181156 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-router-certs\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181176 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-user-template-login\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181198 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181218 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-session\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181242 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-service-ca\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181273 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181292 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-user-template-error\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181316 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181352 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/def3cf42-6aa7-496a-bc35-a465a83a762a-audit-dir\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181370 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181416 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vzmmg\" (UniqueName: \"kubernetes.io/projected/def3cf42-6aa7-496a-bc35-a465a83a762a-kube-api-access-vzmmg\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181442 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181490 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181503 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdfc1d61-7a35-49e6-a091-83226033a80f-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181518 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181531 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.181545 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tq7t\" (UniqueName: \"kubernetes.io/projected/bdfc1d61-7a35-49e6-a091-83226033a80f-kube-api-access-6tq7t\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.182999 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/def3cf42-6aa7-496a-bc35-a465a83a762a-audit-policies\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.183089 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.183178 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/def3cf42-6aa7-496a-bc35-a465a83a762a-audit-dir\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.184442 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.184999 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-service-ca\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.186410 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.187020 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.188342 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-user-template-error\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.189779 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-router-certs\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.215570 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-system-session\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.219346 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-user-template-login\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.219925 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.246055 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vzmmg\" (UniqueName: \"kubernetes.io/projected/def3cf42-6aa7-496a-bc35-a465a83a762a-kube-api-access-vzmmg\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.545944 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/def3cf42-6aa7-496a-bc35-a465a83a762a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-ddfb985b5-xwlhn\" (UID: \"def3cf42-6aa7-496a-bc35-a465a83a762a\") " pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.563235 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "56158b04-1a02-453d-b48a-a107343a3955" (UID: "56158b04-1a02-453d-b48a-a107343a3955"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.614506 4703 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/56158b04-1a02-453d-b48a-a107343a3955-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.614662 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:03 crc kubenswrapper[4703]: I0130 12:01:03.740562 4703 scope.go:117] "RemoveContainer" containerID="cfb233911ad45347c598184bb4d5d424fe43e22808d6a2f2b0af71deea67b738" Jan 30 12:01:04 crc kubenswrapper[4703]: I0130 12:01:04.029577 4703 generic.go:334] "Generic (PLEG): container finished" podID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" containerID="4b2f7b5b5fadfb4b826eac54567977d6c51b0a4f2aa7b51017a43548b7c989c3" exitCode=0 Jan 30 12:01:04 crc kubenswrapper[4703]: I0130 12:01:04.032027 4703 generic.go:334] "Generic (PLEG): container finished" podID="4e4915d0-912f-426c-9d74-3d42e36678ed" containerID="041b55e84c7d7db02d9b79578766cbf5fd25f51b1427e3ea9adb7faaa663b309" exitCode=0 Jan 30 12:01:04 crc kubenswrapper[4703]: I0130 12:01:04.029636 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8gf7w" event={"ID":"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63","Type":"ContainerDied","Data":"4b2f7b5b5fadfb4b826eac54567977d6c51b0a4f2aa7b51017a43548b7c989c3"} Jan 30 12:01:04 crc kubenswrapper[4703]: I0130 12:01:04.032923 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-988gc" event={"ID":"4e4915d0-912f-426c-9d74-3d42e36678ed","Type":"ContainerDied","Data":"041b55e84c7d7db02d9b79578766cbf5fd25f51b1427e3ea9adb7faaa663b309"} Jan 30 12:01:04 crc kubenswrapper[4703]: I0130 12:01:04.040061 4703 scope.go:117] "RemoveContainer" containerID="7bf15f880cc782893ac898aa58150394186eb0ba2d8ccb1292bea472dbc54b9c" Jan 30 12:01:04 crc kubenswrapper[4703]: I0130 12:01:04.539180 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-d4tv6"] Jan 30 12:01:04 crc kubenswrapper[4703]: I0130 12:01:04.543405 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-d4tv6"] Jan 30 12:01:04 crc kubenswrapper[4703]: I0130 12:01:04.685719 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-ddfb985b5-xwlhn"] Jan 30 12:01:04 crc kubenswrapper[4703]: W0130 12:01:04.709440 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddef3cf42_6aa7_496a_bc35_a465a83a762a.slice/crio-309a76d5566af059227e0a8059d9eed05561129b5467d1d3704ab0489171ac06 WatchSource:0}: Error finding container 309a76d5566af059227e0a8059d9eed05561129b5467d1d3704ab0489171ac06: Status 404 returned error can't find the container with id 309a76d5566af059227e0a8059d9eed05561129b5467d1d3704ab0489171ac06 Jan 30 12:01:04 crc kubenswrapper[4703]: I0130 12:01:04.814586 4703 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 30 12:01:05 crc kubenswrapper[4703]: I0130 12:01:05.059948 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" event={"ID":"def3cf42-6aa7-496a-bc35-a465a83a762a","Type":"ContainerStarted","Data":"309a76d5566af059227e0a8059d9eed05561129b5467d1d3704ab0489171ac06"} Jan 30 12:01:05 crc kubenswrapper[4703]: I0130 12:01:05.063472 4703 generic.go:334] "Generic (PLEG): container finished" podID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" containerID="a9cb12f005c18cefa1f8d06258958a47cd9dbae77b07b9321f3a63b0c2201f97" exitCode=0 Jan 30 12:01:05 crc kubenswrapper[4703]: I0130 12:01:05.063514 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64ptg" event={"ID":"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa","Type":"ContainerDied","Data":"a9cb12f005c18cefa1f8d06258958a47cd9dbae77b07b9321f3a63b0c2201f97"} Jan 30 12:01:05 crc kubenswrapper[4703]: I0130 12:01:05.109176 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56158b04-1a02-453d-b48a-a107343a3955" path="/var/lib/kubelet/pods/56158b04-1a02-453d-b48a-a107343a3955/volumes" Jan 30 12:01:05 crc kubenswrapper[4703]: I0130 12:01:05.494000 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bdfc1d61-7a35-49e6-a091-83226033a80f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bdfc1d61-7a35-49e6-a091-83226033a80f" (UID: "bdfc1d61-7a35-49e6-a091-83226033a80f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:01:05 crc kubenswrapper[4703]: I0130 12:01:05.510305 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdfc1d61-7a35-49e6-a091-83226033a80f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:05 crc kubenswrapper[4703]: I0130 12:01:05.679949 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vztsq"] Jan 30 12:01:05 crc kubenswrapper[4703]: I0130 12:01:05.684226 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vztsq"] Jan 30 12:01:06 crc kubenswrapper[4703]: I0130 12:01:06.070055 4703 generic.go:334] "Generic (PLEG): container finished" podID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" containerID="de852a391eee3a25e94fda187413572463abd578a30b743a54190e01ba64fb48" exitCode=0 Jan 30 12:01:06 crc kubenswrapper[4703]: I0130 12:01:06.070146 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ngspg" event={"ID":"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436","Type":"ContainerDied","Data":"de852a391eee3a25e94fda187413572463abd578a30b743a54190e01ba64fb48"} Jan 30 12:01:06 crc kubenswrapper[4703]: I0130 12:01:06.073303 4703 generic.go:334] "Generic (PLEG): container finished" podID="61693ed0-e352-4c89-9076-be1acb1a0bfe" containerID="dc891aede133308f7a05c165f64c0f92eb01e9fd7bda3d7232bb3ac9d9fc896b" exitCode=0 Jan 30 12:01:06 crc kubenswrapper[4703]: I0130 12:01:06.073330 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xzz7n" event={"ID":"61693ed0-e352-4c89-9076-be1acb1a0bfe","Type":"ContainerDied","Data":"dc891aede133308f7a05c165f64c0f92eb01e9fd7bda3d7232bb3ac9d9fc896b"} Jan 30 12:01:06 crc kubenswrapper[4703]: I0130 12:01:06.074581 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" event={"ID":"def3cf42-6aa7-496a-bc35-a465a83a762a","Type":"ContainerStarted","Data":"b189351d8b538de8ecfd2ca9b88255dbd397cb115d70337ac96c2f5884ba3111"} Jan 30 12:01:06 crc kubenswrapper[4703]: I0130 12:01:06.075240 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:06 crc kubenswrapper[4703]: I0130 12:01:06.078359 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m9qpx" event={"ID":"f49515e4-f9bb-4741-a979-5d59fbc7198d","Type":"ContainerStarted","Data":"93821994e6d9e59d41559be2ba22c413f31d7797677c1b229d528b1ea61ab79a"} Jan 30 12:01:06 crc kubenswrapper[4703]: I0130 12:01:06.081861 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-q7gv4" event={"ID":"1441806f-c27a-4a93-82e3-123caba174c5","Type":"ContainerStarted","Data":"96f840e8cb551fc375497349fee12f22346eb3199022d35c2c50b8a26f99aba9"} Jan 30 12:01:06 crc kubenswrapper[4703]: I0130 12:01:06.082206 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-q7gv4" Jan 30 12:01:06 crc kubenswrapper[4703]: I0130 12:01:06.082576 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:01:06 crc kubenswrapper[4703]: I0130 12:01:06.082622 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:01:06 crc kubenswrapper[4703]: I0130 12:01:06.120208 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-m9qpx" podStartSLOduration=8.644893228 podStartE2EDuration="2m2.120184862s" podCreationTimestamp="2026-01-30 11:59:04 +0000 UTC" firstStartedPulling="2026-01-30 11:59:09.425198561 +0000 UTC m=+185.203020215" lastFinishedPulling="2026-01-30 12:01:02.900490195 +0000 UTC m=+298.678311849" observedRunningTime="2026-01-30 12:01:06.119216365 +0000 UTC m=+301.897038029" watchObservedRunningTime="2026-01-30 12:01:06.120184862 +0000 UTC m=+301.898006516" Jan 30 12:01:06 crc kubenswrapper[4703]: I0130 12:01:06.160499 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" podStartSLOduration=40.160476195 podStartE2EDuration="40.160476195s" podCreationTimestamp="2026-01-30 12:00:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:01:06.158509408 +0000 UTC m=+301.936331052" watchObservedRunningTime="2026-01-30 12:01:06.160476195 +0000 UTC m=+301.938297849" Jan 30 12:01:06 crc kubenswrapper[4703]: I0130 12:01:06.649668 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-ddfb985b5-xwlhn" Jan 30 12:01:07 crc kubenswrapper[4703]: I0130 12:01:07.137823 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:01:07 crc kubenswrapper[4703]: I0130 12:01:07.137932 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:01:07 crc kubenswrapper[4703]: I0130 12:01:07.149031 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bdfc1d61-7a35-49e6-a091-83226033a80f" path="/var/lib/kubelet/pods/bdfc1d61-7a35-49e6-a091-83226033a80f/volumes" Jan 30 12:01:07 crc kubenswrapper[4703]: I0130 12:01:07.358046 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:01:07 crc kubenswrapper[4703]: I0130 12:01:07.358095 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:01:07 crc kubenswrapper[4703]: I0130 12:01:07.358158 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:01:07 crc kubenswrapper[4703]: I0130 12:01:07.358113 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.321416 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rsvj7" event={"ID":"11176772-9170-499f-8fec-e460709fd300","Type":"ContainerStarted","Data":"a5a7f2010f118c76418a0a48c6bf2f8ec31fb24a941be46c547f68004ee5bfd3"} Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.343529 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rsvj7" podStartSLOduration=6.953690829 podStartE2EDuration="2m8.343510359s" podCreationTimestamp="2026-01-30 11:59:02 +0000 UTC" firstStartedPulling="2026-01-30 11:59:06.960259895 +0000 UTC m=+182.738081549" lastFinishedPulling="2026-01-30 12:01:08.350079415 +0000 UTC m=+304.127901079" observedRunningTime="2026-01-30 12:01:10.341777447 +0000 UTC m=+306.119599111" watchObservedRunningTime="2026-01-30 12:01:10.343510359 +0000 UTC m=+306.121332013" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.373986 4703 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.375372 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.375951 4703 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.376345 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b" gracePeriod=15 Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.376494 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4" gracePeriod=15 Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.376416 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f" gracePeriod=15 Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.376658 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a" gracePeriod=15 Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.376695 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811" gracePeriod=15 Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.377483 4703 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 30 12:01:10 crc kubenswrapper[4703]: E0130 12:01:10.377826 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.377929 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 30 12:01:10 crc kubenswrapper[4703]: E0130 12:01:10.378010 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.378104 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 30 12:01:10 crc kubenswrapper[4703]: E0130 12:01:10.380041 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.380119 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 30 12:01:10 crc kubenswrapper[4703]: E0130 12:01:10.380222 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.380308 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 30 12:01:10 crc kubenswrapper[4703]: E0130 12:01:10.380375 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.380430 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 30 12:01:10 crc kubenswrapper[4703]: E0130 12:01:10.380510 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.380566 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 30 12:01:10 crc kubenswrapper[4703]: E0130 12:01:10.380622 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.380700 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 30 12:01:10 crc kubenswrapper[4703]: E0130 12:01:10.380779 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.380834 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.381072 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.381189 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.381267 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.381360 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.381440 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.381511 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.381812 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.393006 4703 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.410826 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.410951 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.411008 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.424029 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.512532 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.512587 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.512609 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.512639 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.512663 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.512686 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.512726 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.512753 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.516432 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.516571 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.516674 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.614454 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.614877 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.614910 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.614949 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.614985 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.615105 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.614632 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.615182 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.615210 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.615239 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: I0130 12:01:10.720998 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:01:10 crc kubenswrapper[4703]: W0130 12:01:10.737450 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-9c2329ec1d50fdd433492b78dfcee714e4a82fc92cdd26fde90ae86a7b9d2e0e WatchSource:0}: Error finding container 9c2329ec1d50fdd433492b78dfcee714e4a82fc92cdd26fde90ae86a7b9d2e0e: Status 404 returned error can't find the container with id 9c2329ec1d50fdd433492b78dfcee714e4a82fc92cdd26fde90ae86a7b9d2e0e Jan 30 12:01:10 crc kubenswrapper[4703]: E0130 12:01:10.740011 4703 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.129.56.250:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188f808124fb2879 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 12:01:10.739470457 +0000 UTC m=+306.517292111,LastTimestamp:2026-01-30 12:01:10.739470457 +0000 UTC m=+306.517292111,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 12:01:11 crc kubenswrapper[4703]: I0130 12:01:11.328454 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 30 12:01:11 crc kubenswrapper[4703]: I0130 12:01:11.330864 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 30 12:01:11 crc kubenswrapper[4703]: I0130 12:01:11.331629 4703 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f" exitCode=0 Jan 30 12:01:11 crc kubenswrapper[4703]: I0130 12:01:11.331666 4703 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a" exitCode=0 Jan 30 12:01:11 crc kubenswrapper[4703]: I0130 12:01:11.331674 4703 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4" exitCode=0 Jan 30 12:01:11 crc kubenswrapper[4703]: I0130 12:01:11.331688 4703 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811" exitCode=2 Jan 30 12:01:11 crc kubenswrapper[4703]: I0130 12:01:11.331751 4703 scope.go:117] "RemoveContainer" containerID="173fa73b41a928b25465dddc4d06dc5580a23b7df2156e30745a5b902b637771" Jan 30 12:01:11 crc kubenswrapper[4703]: I0130 12:01:11.334504 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64ptg" event={"ID":"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa","Type":"ContainerStarted","Data":"b0ccce95a18e7ebdacfe38059008165099e4f3c7a59933364b5fb446bf74072d"} Jan 30 12:01:11 crc kubenswrapper[4703]: I0130 12:01:11.336312 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:11 crc kubenswrapper[4703]: I0130 12:01:11.336552 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:11 crc kubenswrapper[4703]: I0130 12:01:11.340560 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"9c2329ec1d50fdd433492b78dfcee714e4a82fc92cdd26fde90ae86a7b9d2e0e"} Jan 30 12:01:11 crc kubenswrapper[4703]: I0130 12:01:11.341836 4703 generic.go:334] "Generic (PLEG): container finished" podID="69ea44cc-6b13-4b1e-9621-448115bc3090" containerID="d095acdf6ead9d9cf62a498001d5667aaa9e862a52fad9a88d291040ad7ebd64" exitCode=0 Jan 30 12:01:11 crc kubenswrapper[4703]: I0130 12:01:11.341868 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"69ea44cc-6b13-4b1e-9621-448115bc3090","Type":"ContainerDied","Data":"d095acdf6ead9d9cf62a498001d5667aaa9e862a52fad9a88d291040ad7ebd64"} Jan 30 12:01:11 crc kubenswrapper[4703]: I0130 12:01:11.342273 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:11 crc kubenswrapper[4703]: I0130 12:01:11.342445 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:11 crc kubenswrapper[4703]: I0130 12:01:11.342663 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:11 crc kubenswrapper[4703]: E0130 12:01:11.825067 4703 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:11 crc kubenswrapper[4703]: E0130 12:01:11.825562 4703 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:11 crc kubenswrapper[4703]: E0130 12:01:11.825801 4703 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:11 crc kubenswrapper[4703]: E0130 12:01:11.825969 4703 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:11 crc kubenswrapper[4703]: E0130 12:01:11.826137 4703 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:11 crc kubenswrapper[4703]: I0130 12:01:11.826160 4703 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 30 12:01:11 crc kubenswrapper[4703]: E0130 12:01:11.826317 4703 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" interval="200ms" Jan 30 12:01:12 crc kubenswrapper[4703]: E0130 12:01:12.205901 4703 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" interval="400ms" Jan 30 12:01:12 crc kubenswrapper[4703]: I0130 12:01:12.350540 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"67da4d5683eccdc9d7976e29642affa7f65024d170d748a76ca72257916a40c0"} Jan 30 12:01:12 crc kubenswrapper[4703]: I0130 12:01:12.351800 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:12 crc kubenswrapper[4703]: I0130 12:01:12.352266 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:12 crc kubenswrapper[4703]: I0130 12:01:12.352585 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:12 crc kubenswrapper[4703]: I0130 12:01:12.465268 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rsvj7" Jan 30 12:01:12 crc kubenswrapper[4703]: I0130 12:01:12.465380 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rsvj7" Jan 30 12:01:12 crc kubenswrapper[4703]: E0130 12:01:12.607863 4703 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" interval="800ms" Jan 30 12:01:13 crc kubenswrapper[4703]: E0130 12:01:13.409205 4703 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" interval="1.6s" Jan 30 12:01:13 crc kubenswrapper[4703]: I0130 12:01:13.527433 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-rsvj7" podUID="11176772-9170-499f-8fec-e460709fd300" containerName="registry-server" probeResult="failure" output=< Jan 30 12:01:13 crc kubenswrapper[4703]: timeout: failed to connect service ":50051" within 1s Jan 30 12:01:13 crc kubenswrapper[4703]: > Jan 30 12:01:13 crc kubenswrapper[4703]: I0130 12:01:13.674145 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-64ptg" Jan 30 12:01:13 crc kubenswrapper[4703]: I0130 12:01:13.674259 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-64ptg" Jan 30 12:01:13 crc kubenswrapper[4703]: I0130 12:01:13.773551 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 30 12:01:13 crc kubenswrapper[4703]: I0130 12:01:13.774882 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:13 crc kubenswrapper[4703]: I0130 12:01:13.775504 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:13 crc kubenswrapper[4703]: I0130 12:01:13.775858 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:13 crc kubenswrapper[4703]: I0130 12:01:13.861419 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/69ea44cc-6b13-4b1e-9621-448115bc3090-kubelet-dir\") pod \"69ea44cc-6b13-4b1e-9621-448115bc3090\" (UID: \"69ea44cc-6b13-4b1e-9621-448115bc3090\") " Jan 30 12:01:13 crc kubenswrapper[4703]: I0130 12:01:13.861510 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/69ea44cc-6b13-4b1e-9621-448115bc3090-kube-api-access\") pod \"69ea44cc-6b13-4b1e-9621-448115bc3090\" (UID: \"69ea44cc-6b13-4b1e-9621-448115bc3090\") " Jan 30 12:01:13 crc kubenswrapper[4703]: I0130 12:01:13.861553 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/69ea44cc-6b13-4b1e-9621-448115bc3090-var-lock\") pod \"69ea44cc-6b13-4b1e-9621-448115bc3090\" (UID: \"69ea44cc-6b13-4b1e-9621-448115bc3090\") " Jan 30 12:01:13 crc kubenswrapper[4703]: I0130 12:01:13.861596 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69ea44cc-6b13-4b1e-9621-448115bc3090-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "69ea44cc-6b13-4b1e-9621-448115bc3090" (UID: "69ea44cc-6b13-4b1e-9621-448115bc3090"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:01:13 crc kubenswrapper[4703]: I0130 12:01:13.861677 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/69ea44cc-6b13-4b1e-9621-448115bc3090-var-lock" (OuterVolumeSpecName: "var-lock") pod "69ea44cc-6b13-4b1e-9621-448115bc3090" (UID: "69ea44cc-6b13-4b1e-9621-448115bc3090"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:01:13 crc kubenswrapper[4703]: I0130 12:01:13.861752 4703 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/69ea44cc-6b13-4b1e-9621-448115bc3090-var-lock\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:13 crc kubenswrapper[4703]: I0130 12:01:13.861764 4703 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/69ea44cc-6b13-4b1e-9621-448115bc3090-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:13 crc kubenswrapper[4703]: I0130 12:01:13.878623 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69ea44cc-6b13-4b1e-9621-448115bc3090-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "69ea44cc-6b13-4b1e-9621-448115bc3090" (UID: "69ea44cc-6b13-4b1e-9621-448115bc3090"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:01:13 crc kubenswrapper[4703]: I0130 12:01:13.965759 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/69ea44cc-6b13-4b1e-9621-448115bc3090-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.386923 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.386922 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"69ea44cc-6b13-4b1e-9621-448115bc3090","Type":"ContainerDied","Data":"043ef4ef6b71811cdfcfefca34acd3b65c6870f58f724ee1455bc546a0b56899"} Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.387003 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="043ef4ef6b71811cdfcfefca34acd3b65c6870f58f724ee1455bc546a0b56899" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.392570 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.393768 4703 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b" exitCode=0 Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.409662 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.410694 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.411729 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.600165 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.601518 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.602263 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.602800 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.603946 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.604375 4703 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.683653 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.683785 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.683818 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.683834 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.683881 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.683981 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.684242 4703 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.684258 4703 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.684267 4703 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 30 12:01:14 crc kubenswrapper[4703]: I0130 12:01:14.731987 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-64ptg" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" containerName="registry-server" probeResult="failure" output=< Jan 30 12:01:14 crc kubenswrapper[4703]: timeout: failed to connect service ":50051" within 1s Jan 30 12:01:14 crc kubenswrapper[4703]: > Jan 30 12:01:14 crc kubenswrapper[4703]: E0130 12:01:14.927460 4703 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.129.56.250:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188f808124fb2879 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 12:01:10.739470457 +0000 UTC m=+306.517292111,LastTimestamp:2026-01-30 12:01:10.739470457 +0000 UTC m=+306.517292111,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 12:01:15 crc kubenswrapper[4703]: E0130 12:01:15.011932 4703 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" interval="3.2s" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.091492 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.091858 4703 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.092195 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.092442 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.096433 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.407095 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.410112 4703 scope.go:117] "RemoveContainer" containerID="5c67d60f22420699740f9892d335e74526891a603437dfa57e9e780c8787d73f" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.410192 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.412024 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.413394 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.414423 4703 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.415503 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.420899 4703 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.422058 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.423043 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.424086 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.492755 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.494610 4703 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.495165 4703 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.495533 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.495871 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.496282 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.558576 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-m9qpx" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.655643 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-m9qpx" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.657513 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.658818 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.659882 4703 status_manager.go:851] "Failed to get status for pod" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" pod="openshift-marketplace/redhat-marketplace-m9qpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-m9qpx\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.660410 4703 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.660858 4703 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.661319 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.683926 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-m9qpx" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.759400 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-m9qpx" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.760449 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.761009 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.761461 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.761962 4703 status_manager.go:851] "Failed to get status for pod" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" pod="openshift-marketplace/redhat-marketplace-m9qpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-m9qpx\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.762553 4703 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.762966 4703 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:15 crc kubenswrapper[4703]: I0130 12:01:15.856706 4703 scope.go:117] "RemoveContainer" containerID="57d9bba091f7a93514fa8f4573acefc2ee9c3acbf2729a5e764f7cf14b343d4a" Jan 30 12:01:16 crc kubenswrapper[4703]: I0130 12:01:16.416471 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 30 12:01:17 crc kubenswrapper[4703]: I0130 12:01:17.358287 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:01:17 crc kubenswrapper[4703]: I0130 12:01:17.358348 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:01:17 crc kubenswrapper[4703]: I0130 12:01:17.358374 4703 patch_prober.go:28] interesting pod/downloads-7954f5f757-q7gv4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Jan 30 12:01:17 crc kubenswrapper[4703]: I0130 12:01:17.358399 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-q7gv4" podUID="1441806f-c27a-4a93-82e3-123caba174c5" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Jan 30 12:01:17 crc kubenswrapper[4703]: I0130 12:01:17.526058 4703 scope.go:117] "RemoveContainer" containerID="66d985904753c027feec8d9b2e46ac510e5f4845b1a71e04586200d0b140cfc4" Jan 30 12:01:18 crc kubenswrapper[4703]: E0130 12:01:18.212947 4703 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" interval="6.4s" Jan 30 12:01:18 crc kubenswrapper[4703]: I0130 12:01:18.431991 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 30 12:01:19 crc kubenswrapper[4703]: I0130 12:01:19.190424 4703 scope.go:117] "RemoveContainer" containerID="17229c0116cfe2adeac582befbed75033c83cde5fb18b2686b3e8201f98a0811" Jan 30 12:01:19 crc kubenswrapper[4703]: I0130 12:01:19.209975 4703 scope.go:117] "RemoveContainer" containerID="9b381b8216d72978dc3d0bd93e1ab0dfc62b09544a3895b4191a47fb669ff72b" Jan 30 12:01:19 crc kubenswrapper[4703]: I0130 12:01:19.272608 4703 scope.go:117] "RemoveContainer" containerID="b386f70d7560e11dfba6ab1477d122d366d93c80e2ad425d4b34ca4c4e054cc2" Jan 30 12:01:21 crc kubenswrapper[4703]: I0130 12:01:21.087186 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:21 crc kubenswrapper[4703]: I0130 12:01:21.088641 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:21 crc kubenswrapper[4703]: I0130 12:01:21.089199 4703 status_manager.go:851] "Failed to get status for pod" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" pod="openshift-marketplace/redhat-marketplace-m9qpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-m9qpx\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:21 crc kubenswrapper[4703]: I0130 12:01:21.090319 4703 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:21 crc kubenswrapper[4703]: I0130 12:01:21.091386 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:21 crc kubenswrapper[4703]: I0130 12:01:21.091783 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:21 crc kubenswrapper[4703]: I0130 12:01:21.106263 4703 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bf2f3ce3-7aeb-4736-a391-655b001a1785" Jan 30 12:01:21 crc kubenswrapper[4703]: I0130 12:01:21.106303 4703 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bf2f3ce3-7aeb-4736-a391-655b001a1785" Jan 30 12:01:21 crc kubenswrapper[4703]: E0130 12:01:21.107077 4703 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:21 crc kubenswrapper[4703]: I0130 12:01:21.107852 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:21 crc kubenswrapper[4703]: W0130 12:01:21.131583 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-01f844a9e4b8fef448ea71941c5838a37722c5880faacc90c82d6c5b7875100d WatchSource:0}: Error finding container 01f844a9e4b8fef448ea71941c5838a37722c5880faacc90c82d6c5b7875100d: Status 404 returned error can't find the container with id 01f844a9e4b8fef448ea71941c5838a37722c5880faacc90c82d6c5b7875100d Jan 30 12:01:21 crc kubenswrapper[4703]: I0130 12:01:21.467083 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"01f844a9e4b8fef448ea71941c5838a37722c5880faacc90c82d6c5b7875100d"} Jan 30 12:01:21 crc kubenswrapper[4703]: I0130 12:01:21.472783 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8gf7w" event={"ID":"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63","Type":"ContainerStarted","Data":"2c3df39413546964e4c718057c5c9dec2ae14b2ff21a563454e4a895c84b197f"} Jan 30 12:01:21 crc kubenswrapper[4703]: I0130 12:01:21.477819 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-988gc" event={"ID":"4e4915d0-912f-426c-9d74-3d42e36678ed","Type":"ContainerStarted","Data":"8691c8a86fadd5c5538c351f9612fb44f9fe4406c93e911ea9c3a147c3f582fe"} Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.825913 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ngspg" event={"ID":"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436","Type":"ContainerStarted","Data":"3ad1813ed1716fa3c61d57cb0eb17f0f73685562b8aa6e0a68ceeb80bdb4f3d7"} Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.842107 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.842393 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.842799 4703 status_manager.go:851] "Failed to get status for pod" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" pod="openshift-marketplace/redhat-marketplace-m9qpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-m9qpx\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.843187 4703 status_manager.go:851] "Failed to get status for pod" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" pod="openshift-marketplace/redhat-operators-ngspg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ngspg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.843981 4703 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.844476 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.845973 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xzz7n" event={"ID":"61693ed0-e352-4c89-9076-be1acb1a0bfe","Type":"ContainerStarted","Data":"6e5e476cf7a0129cfa7252f7d13fff89d9f0cc2fd586ec06ae55e1644dbe055b"} Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.846832 4703 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.847152 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.847390 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.847615 4703 status_manager.go:851] "Failed to get status for pod" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" pod="openshift-marketplace/community-operators-988gc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-988gc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.847903 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.848205 4703 status_manager.go:851] "Failed to get status for pod" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" pod="openshift-marketplace/redhat-marketplace-m9qpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-m9qpx\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.848495 4703 status_manager.go:851] "Failed to get status for pod" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" pod="openshift-marketplace/redhat-operators-ngspg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ngspg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.848813 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.849198 4703 status_manager.go:851] "Failed to get status for pod" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" pod="openshift-marketplace/community-operators-988gc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-988gc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.849422 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.849632 4703 status_manager.go:851] "Failed to get status for pod" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" pod="openshift-marketplace/redhat-marketplace-m9qpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-m9qpx\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.850347 4703 status_manager.go:851] "Failed to get status for pod" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" pod="openshift-marketplace/redhat-operators-xzz7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xzz7n\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.851097 4703 status_manager.go:851] "Failed to get status for pod" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" pod="openshift-marketplace/redhat-operators-ngspg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ngspg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.851281 4703 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.851442 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.881230 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rsvj7" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.882594 4703 status_manager.go:851] "Failed to get status for pod" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" pod="openshift-marketplace/redhat-operators-ngspg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ngspg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.883012 4703 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.883650 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.884076 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.884415 4703 status_manager.go:851] "Failed to get status for pod" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" pod="openshift-marketplace/community-operators-988gc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-988gc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.884623 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.884957 4703 status_manager.go:851] "Failed to get status for pod" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" pod="openshift-marketplace/redhat-marketplace-m9qpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-m9qpx\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.885190 4703 status_manager.go:851] "Failed to get status for pod" podUID="11176772-9170-499f-8fec-e460709fd300" pod="openshift-marketplace/certified-operators-rsvj7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-rsvj7\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.886015 4703 status_manager.go:851] "Failed to get status for pod" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" pod="openshift-marketplace/redhat-operators-xzz7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xzz7n\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.931501 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rsvj7" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.932172 4703 status_manager.go:851] "Failed to get status for pod" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" pod="openshift-marketplace/redhat-operators-ngspg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ngspg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.932742 4703 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.933419 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.933751 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.934235 4703 status_manager.go:851] "Failed to get status for pod" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" pod="openshift-marketplace/community-operators-988gc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-988gc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.934569 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.934925 4703 status_manager.go:851] "Failed to get status for pod" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" pod="openshift-marketplace/redhat-marketplace-m9qpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-m9qpx\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.935226 4703 status_manager.go:851] "Failed to get status for pod" podUID="11176772-9170-499f-8fec-e460709fd300" pod="openshift-marketplace/certified-operators-rsvj7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-rsvj7\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:22 crc kubenswrapper[4703]: I0130 12:01:22.935445 4703 status_manager.go:851] "Failed to get status for pod" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" pod="openshift-marketplace/redhat-operators-xzz7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xzz7n\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.013693 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-64ptg" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.014739 4703 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.015456 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.016479 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.017242 4703 status_manager.go:851] "Failed to get status for pod" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" pod="openshift-marketplace/community-operators-988gc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-988gc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.017632 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.018037 4703 status_manager.go:851] "Failed to get status for pod" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" pod="openshift-marketplace/redhat-marketplace-m9qpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-m9qpx\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.022593 4703 status_manager.go:851] "Failed to get status for pod" podUID="11176772-9170-499f-8fec-e460709fd300" pod="openshift-marketplace/certified-operators-rsvj7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-rsvj7\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.027406 4703 status_manager.go:851] "Failed to get status for pod" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" pod="openshift-marketplace/redhat-operators-xzz7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xzz7n\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.028160 4703 status_manager.go:851] "Failed to get status for pod" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" pod="openshift-marketplace/redhat-operators-ngspg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ngspg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.033961 4703 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="86a5fb8f6e45c5e63564717d82fcee2d9d8f1a02fd3e7a4dd011c9c54ff152f1" exitCode=0 Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.034071 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"86a5fb8f6e45c5e63564717d82fcee2d9d8f1a02fd3e7a4dd011c9c54ff152f1"} Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.034744 4703 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bf2f3ce3-7aeb-4736-a391-655b001a1785" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.034799 4703 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bf2f3ce3-7aeb-4736-a391-655b001a1785" Jan 30 12:01:24 crc kubenswrapper[4703]: E0130 12:01:24.035278 4703 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.035340 4703 status_manager.go:851] "Failed to get status for pod" podUID="11176772-9170-499f-8fec-e460709fd300" pod="openshift-marketplace/certified-operators-rsvj7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-rsvj7\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.035758 4703 status_manager.go:851] "Failed to get status for pod" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" pod="openshift-marketplace/redhat-operators-xzz7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xzz7n\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.036055 4703 status_manager.go:851] "Failed to get status for pod" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" pod="openshift-marketplace/redhat-operators-ngspg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ngspg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.037112 4703 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.037521 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.037952 4703 status_manager.go:851] "Failed to get status for pod" podUID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" pod="openshift-marketplace/community-operators-8gf7w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8gf7w\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.038176 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.038324 4703 status_manager.go:851] "Failed to get status for pod" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" pod="openshift-marketplace/community-operators-988gc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-988gc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.038540 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.038717 4703 status_manager.go:851] "Failed to get status for pod" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" pod="openshift-marketplace/redhat-marketplace-m9qpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-m9qpx\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.038972 4703 status_manager.go:851] "Failed to get status for pod" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" pod="openshift-marketplace/redhat-operators-xzz7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xzz7n\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.039115 4703 status_manager.go:851] "Failed to get status for pod" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" pod="openshift-marketplace/redhat-operators-ngspg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ngspg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.039390 4703 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.039852 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.040629 4703 status_manager.go:851] "Failed to get status for pod" podUID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" pod="openshift-marketplace/community-operators-8gf7w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8gf7w\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.040809 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.040939 4703 status_manager.go:851] "Failed to get status for pod" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" pod="openshift-marketplace/community-operators-988gc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-988gc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.041077 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.041341 4703 status_manager.go:851] "Failed to get status for pod" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" pod="openshift-marketplace/redhat-marketplace-m9qpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-m9qpx\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.041495 4703 status_manager.go:851] "Failed to get status for pod" podUID="11176772-9170-499f-8fec-e460709fd300" pod="openshift-marketplace/certified-operators-rsvj7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-rsvj7\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.106186 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-64ptg" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.106945 4703 status_manager.go:851] "Failed to get status for pod" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" pod="openshift-marketplace/redhat-operators-ngspg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ngspg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.107621 4703 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.108207 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.108570 4703 status_manager.go:851] "Failed to get status for pod" podUID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" pod="openshift-marketplace/community-operators-8gf7w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8gf7w\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.108960 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.111524 4703 status_manager.go:851] "Failed to get status for pod" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" pod="openshift-marketplace/community-operators-988gc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-988gc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.111945 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.112352 4703 status_manager.go:851] "Failed to get status for pod" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" pod="openshift-marketplace/redhat-marketplace-m9qpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-m9qpx\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.112666 4703 status_manager.go:851] "Failed to get status for pod" podUID="11176772-9170-499f-8fec-e460709fd300" pod="openshift-marketplace/certified-operators-rsvj7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-rsvj7\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.112985 4703 status_manager.go:851] "Failed to get status for pod" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" pod="openshift-marketplace/redhat-operators-xzz7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xzz7n\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:24 crc kubenswrapper[4703]: E0130 12:01:24.614032 4703 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.250:6443: connect: connection refused" interval="7s" Jan 30 12:01:24 crc kubenswrapper[4703]: E0130 12:01:24.928910 4703 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.129.56.250:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.188f808124fb2879 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-30 12:01:10.739470457 +0000 UTC m=+306.517292111,LastTimestamp:2026-01-30 12:01:10.739470457 +0000 UTC m=+306.517292111,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.952558 4703 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Readiness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 30 12:01:24 crc kubenswrapper[4703]: I0130 12:01:24.952665 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.042729 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.042789 4703 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29" exitCode=1 Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.043177 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29"} Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.044288 4703 scope.go:117] "RemoveContainer" containerID="808ecf7e7d4a68df78a1018d23d44064588a4e9aae2a0e3c50ef657080684c29" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.044537 4703 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.044944 4703 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.045201 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.045433 4703 status_manager.go:851] "Failed to get status for pod" podUID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" pod="openshift-marketplace/community-operators-8gf7w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8gf7w\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.045712 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.045925 4703 status_manager.go:851] "Failed to get status for pod" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" pod="openshift-marketplace/community-operators-988gc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-988gc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.046160 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.046601 4703 status_manager.go:851] "Failed to get status for pod" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" pod="openshift-marketplace/redhat-marketplace-m9qpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-m9qpx\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.047290 4703 status_manager.go:851] "Failed to get status for pod" podUID="11176772-9170-499f-8fec-e460709fd300" pod="openshift-marketplace/certified-operators-rsvj7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-rsvj7\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.047786 4703 status_manager.go:851] "Failed to get status for pod" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" pod="openshift-marketplace/redhat-operators-xzz7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xzz7n\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.048274 4703 status_manager.go:851] "Failed to get status for pod" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" pod="openshift-marketplace/redhat-operators-ngspg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ngspg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.092035 4703 status_manager.go:851] "Failed to get status for pod" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" pod="openshift-marketplace/community-operators-988gc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-988gc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.093511 4703 status_manager.go:851] "Failed to get status for pod" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.093876 4703 status_manager.go:851] "Failed to get status for pod" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" pod="openshift-marketplace/redhat-marketplace-m9qpx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-m9qpx\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.094104 4703 status_manager.go:851] "Failed to get status for pod" podUID="11176772-9170-499f-8fec-e460709fd300" pod="openshift-marketplace/certified-operators-rsvj7" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-rsvj7\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.094349 4703 status_manager.go:851] "Failed to get status for pod" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" pod="openshift-marketplace/redhat-operators-xzz7n" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-xzz7n\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.094576 4703 status_manager.go:851] "Failed to get status for pod" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" pod="openshift-marketplace/redhat-operators-ngspg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ngspg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.095853 4703 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.096158 4703 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.096322 4703 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.096483 4703 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.096624 4703 status_manager.go:851] "Failed to get status for pod" podUID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" pod="openshift-marketplace/community-operators-8gf7w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-8gf7w\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:25 crc kubenswrapper[4703]: I0130 12:01:25.096848 4703 status_manager.go:851] "Failed to get status for pod" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" pod="openshift-marketplace/certified-operators-64ptg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-64ptg\": dial tcp 38.129.56.250:6443: connect: connection refused" Jan 30 12:01:26 crc kubenswrapper[4703]: I0130 12:01:26.051403 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5fb52c075c03c1d92b3d5607a473bf3c4d69f908b9747d91c63b0c66c3f40f6c"} Jan 30 12:01:26 crc kubenswrapper[4703]: I0130 12:01:26.056047 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 30 12:01:26 crc kubenswrapper[4703]: I0130 12:01:26.056178 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"63202f68bc6bb7520ed1f6eade96483ee805bac30c1c75c735a5dd296be25df3"} Jan 30 12:01:26 crc kubenswrapper[4703]: I0130 12:01:26.075904 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ngspg" Jan 30 12:01:26 crc kubenswrapper[4703]: I0130 12:01:26.076030 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ngspg" Jan 30 12:01:26 crc kubenswrapper[4703]: I0130 12:01:26.699696 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xzz7n" Jan 30 12:01:26 crc kubenswrapper[4703]: I0130 12:01:26.699839 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xzz7n" Jan 30 12:01:27 crc kubenswrapper[4703]: I0130 12:01:27.138169 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ngspg" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" containerName="registry-server" probeResult="failure" output=< Jan 30 12:01:27 crc kubenswrapper[4703]: timeout: failed to connect service ":50051" within 1s Jan 30 12:01:27 crc kubenswrapper[4703]: > Jan 30 12:01:27 crc kubenswrapper[4703]: I0130 12:01:27.148798 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2897f7420a62b1873cadc269104d1512fd56cdc594b09e7ba98f8f56cde5cc01"} Jan 30 12:01:27 crc kubenswrapper[4703]: I0130 12:01:27.148861 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5aa1c74959b51541c2763c97af03f8563698c107a6f2874216b36cdcb144933e"} Jan 30 12:01:27 crc kubenswrapper[4703]: I0130 12:01:27.409359 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-q7gv4" Jan 30 12:01:27 crc kubenswrapper[4703]: I0130 12:01:27.930482 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xzz7n" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" containerName="registry-server" probeResult="failure" output=< Jan 30 12:01:27 crc kubenswrapper[4703]: timeout: failed to connect service ":50051" within 1s Jan 30 12:01:27 crc kubenswrapper[4703]: > Jan 30 12:01:28 crc kubenswrapper[4703]: I0130 12:01:28.158236 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"11f40685a4ecfeddc9bc041463a975cafb4c59f3712a18e46a2e0ce3f29c2a61"} Jan 30 12:01:28 crc kubenswrapper[4703]: I0130 12:01:28.158285 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e008a773945c1a847f946a70f7be32065168a2f60840775fd293454d25ea0798"} Jan 30 12:01:28 crc kubenswrapper[4703]: I0130 12:01:28.158597 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:28 crc kubenswrapper[4703]: I0130 12:01:28.158799 4703 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bf2f3ce3-7aeb-4736-a391-655b001a1785" Jan 30 12:01:28 crc kubenswrapper[4703]: I0130 12:01:28.158841 4703 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bf2f3ce3-7aeb-4736-a391-655b001a1785" Jan 30 12:01:29 crc kubenswrapper[4703]: I0130 12:01:29.353458 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 12:01:29 crc kubenswrapper[4703]: I0130 12:01:29.353590 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 12:01:29 crc kubenswrapper[4703]: I0130 12:01:29.354508 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 12:01:31 crc kubenswrapper[4703]: I0130 12:01:31.108787 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:31 crc kubenswrapper[4703]: I0130 12:01:31.109210 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:31 crc kubenswrapper[4703]: I0130 12:01:31.113845 4703 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]log ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]etcd ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/openshift.io-api-request-count-filter ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/openshift.io-startkubeinformers ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/generic-apiserver-start-informers ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/priority-and-fairness-config-consumer ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/priority-and-fairness-filter ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/start-apiextensions-informers ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/start-apiextensions-controllers ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/crd-informer-synced ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/start-system-namespaces-controller ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/start-cluster-authentication-info-controller ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/start-legacy-token-tracking-controller ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/start-service-ip-repair-controllers ok Jan 30 12:01:31 crc kubenswrapper[4703]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/scheduling/bootstrap-system-priority-classes ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/priority-and-fairness-config-producer ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/bootstrap-controller ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/start-kube-aggregator-informers ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/apiservice-status-local-available-controller ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/apiservice-status-remote-available-controller ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/apiservice-registration-controller ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/apiservice-wait-for-first-sync ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/apiservice-discovery-controller ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/kube-apiserver-autoregistration ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]autoregister-completion ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/apiservice-openapi-controller ok Jan 30 12:01:31 crc kubenswrapper[4703]: [+]poststarthook/apiservice-openapiv3-controller ok Jan 30 12:01:31 crc kubenswrapper[4703]: livez check failed Jan 30 12:01:31 crc kubenswrapper[4703]: I0130 12:01:31.115426 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 30 12:01:32 crc kubenswrapper[4703]: I0130 12:01:32.562834 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-988gc" Jan 30 12:01:32 crc kubenswrapper[4703]: I0130 12:01:32.563430 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-988gc" Jan 30 12:01:32 crc kubenswrapper[4703]: I0130 12:01:32.666392 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-988gc" Jan 30 12:01:32 crc kubenswrapper[4703]: I0130 12:01:32.910865 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8gf7w" Jan 30 12:01:32 crc kubenswrapper[4703]: I0130 12:01:32.910924 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8gf7w" Jan 30 12:01:32 crc kubenswrapper[4703]: I0130 12:01:32.975772 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8gf7w" Jan 30 12:01:33 crc kubenswrapper[4703]: I0130 12:01:33.467657 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-988gc" Jan 30 12:01:33 crc kubenswrapper[4703]: I0130 12:01:33.478756 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8gf7w" Jan 30 12:01:33 crc kubenswrapper[4703]: I0130 12:01:33.940524 4703 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:34 crc kubenswrapper[4703]: I0130 12:01:34.222722 4703 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="ea47f35d-97f8-4a6e-9bf7-89b57fbe265f" Jan 30 12:01:34 crc kubenswrapper[4703]: I0130 12:01:34.402312 4703 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bf2f3ce3-7aeb-4736-a391-655b001a1785" Jan 30 12:01:34 crc kubenswrapper[4703]: I0130 12:01:34.402343 4703 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="bf2f3ce3-7aeb-4736-a391-655b001a1785" Jan 30 12:01:34 crc kubenswrapper[4703]: I0130 12:01:34.520856 4703 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="ea47f35d-97f8-4a6e-9bf7-89b57fbe265f" Jan 30 12:01:36 crc kubenswrapper[4703]: I0130 12:01:36.135433 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ngspg" Jan 30 12:01:36 crc kubenswrapper[4703]: I0130 12:01:36.179374 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ngspg" Jan 30 12:01:36 crc kubenswrapper[4703]: I0130 12:01:36.750742 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xzz7n" Jan 30 12:01:36 crc kubenswrapper[4703]: I0130 12:01:36.795308 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xzz7n" Jan 30 12:01:44 crc kubenswrapper[4703]: I0130 12:01:44.599849 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 30 12:01:44 crc kubenswrapper[4703]: I0130 12:01:44.956165 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 30 12:01:44 crc kubenswrapper[4703]: I0130 12:01:44.985026 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 30 12:01:45 crc kubenswrapper[4703]: I0130 12:01:45.542334 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 30 12:01:45 crc kubenswrapper[4703]: I0130 12:01:45.574369 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 30 12:01:45 crc kubenswrapper[4703]: I0130 12:01:45.576728 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 30 12:01:45 crc kubenswrapper[4703]: I0130 12:01:45.595404 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 30 12:01:45 crc kubenswrapper[4703]: I0130 12:01:45.915439 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 30 12:01:45 crc kubenswrapper[4703]: I0130 12:01:45.919500 4703 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 30 12:01:46 crc kubenswrapper[4703]: I0130 12:01:46.035764 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 30 12:01:46 crc kubenswrapper[4703]: I0130 12:01:46.110641 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 30 12:01:46 crc kubenswrapper[4703]: I0130 12:01:46.166530 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 30 12:01:46 crc kubenswrapper[4703]: I0130 12:01:46.249736 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 30 12:01:46 crc kubenswrapper[4703]: I0130 12:01:46.745246 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 30 12:01:46 crc kubenswrapper[4703]: I0130 12:01:46.824476 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 30 12:01:46 crc kubenswrapper[4703]: I0130 12:01:46.963601 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 30 12:01:47 crc kubenswrapper[4703]: I0130 12:01:47.069775 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 30 12:01:47 crc kubenswrapper[4703]: I0130 12:01:47.159747 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 30 12:01:47 crc kubenswrapper[4703]: I0130 12:01:47.226716 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 30 12:01:47 crc kubenswrapper[4703]: I0130 12:01:47.324441 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 30 12:01:47 crc kubenswrapper[4703]: I0130 12:01:47.339465 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 30 12:01:47 crc kubenswrapper[4703]: I0130 12:01:47.544988 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 30 12:01:47 crc kubenswrapper[4703]: I0130 12:01:47.586476 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 30 12:01:47 crc kubenswrapper[4703]: I0130 12:01:47.614976 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 30 12:01:47 crc kubenswrapper[4703]: I0130 12:01:47.639949 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 30 12:01:47 crc kubenswrapper[4703]: I0130 12:01:47.713722 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 30 12:01:47 crc kubenswrapper[4703]: I0130 12:01:47.745230 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 30 12:01:47 crc kubenswrapper[4703]: I0130 12:01:47.862922 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 30 12:01:47 crc kubenswrapper[4703]: I0130 12:01:47.973505 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 30 12:01:47 crc kubenswrapper[4703]: I0130 12:01:47.991602 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.219700 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.273944 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.353708 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.360072 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.387206 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.447941 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.510526 4703 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.513847 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.522677 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.580173 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.621874 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.643904 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.654249 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.661618 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.683566 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.701914 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.713330 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.765158 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.834177 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.884188 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.965604 4703 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.966461 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ngspg" podStartSLOduration=39.860754948 podStartE2EDuration="2m43.966435339s" podCreationTimestamp="2026-01-30 11:59:05 +0000 UTC" firstStartedPulling="2026-01-30 11:59:11.750306587 +0000 UTC m=+187.528128241" lastFinishedPulling="2026-01-30 12:01:15.855986978 +0000 UTC m=+311.633808632" observedRunningTime="2026-01-30 12:01:34.216751323 +0000 UTC m=+329.994572997" watchObservedRunningTime="2026-01-30 12:01:48.966435339 +0000 UTC m=+344.744256983" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.967331 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=38.967323857 podStartE2EDuration="38.967323857s" podCreationTimestamp="2026-01-30 12:01:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:01:34.280231765 +0000 UTC m=+330.058053429" watchObservedRunningTime="2026-01-30 12:01:48.967323857 +0000 UTC m=+344.745145511" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.967416 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-64ptg" podStartSLOduration=45.934810297 podStartE2EDuration="2m46.96740931s" podCreationTimestamp="2026-01-30 11:59:02 +0000 UTC" firstStartedPulling="2026-01-30 11:59:09.121825824 +0000 UTC m=+184.899647478" lastFinishedPulling="2026-01-30 12:01:10.154424837 +0000 UTC m=+305.932246491" observedRunningTime="2026-01-30 12:01:34.355756904 +0000 UTC m=+330.133578548" watchObservedRunningTime="2026-01-30 12:01:48.96740931 +0000 UTC m=+344.745230974" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.967639 4703 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.969176 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8gf7w" podStartSLOduration=37.21141465 podStartE2EDuration="2m46.969168525s" podCreationTimestamp="2026-01-30 11:59:02 +0000 UTC" firstStartedPulling="2026-01-30 11:59:09.425059027 +0000 UTC m=+185.202880681" lastFinishedPulling="2026-01-30 12:01:19.182812902 +0000 UTC m=+314.960634556" observedRunningTime="2026-01-30 12:01:34.297946989 +0000 UTC m=+330.075768643" watchObservedRunningTime="2026-01-30 12:01:48.969168525 +0000 UTC m=+344.746990179" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.969934 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xzz7n" podStartSLOduration=36.557090064 podStartE2EDuration="2m43.969925329s" podCreationTimestamp="2026-01-30 11:59:05 +0000 UTC" firstStartedPulling="2026-01-30 11:59:11.769093731 +0000 UTC m=+187.546915385" lastFinishedPulling="2026-01-30 12:01:19.181928996 +0000 UTC m=+314.959750650" observedRunningTime="2026-01-30 12:01:34.456042486 +0000 UTC m=+330.233864150" watchObservedRunningTime="2026-01-30 12:01:48.969925329 +0000 UTC m=+344.747746983" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.987291 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-988gc" podStartSLOduration=42.219527637 podStartE2EDuration="2m47.987251519s" podCreationTimestamp="2026-01-30 11:59:01 +0000 UTC" firstStartedPulling="2026-01-30 11:59:07.949752162 +0000 UTC m=+183.727573816" lastFinishedPulling="2026-01-30 12:01:13.717476044 +0000 UTC m=+309.495297698" observedRunningTime="2026-01-30 12:01:34.377489923 +0000 UTC m=+330.155311577" watchObservedRunningTime="2026-01-30 12:01:48.987251519 +0000 UTC m=+344.765073173" Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.988653 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.988750 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 30 12:01:48 crc kubenswrapper[4703]: I0130 12:01:48.995322 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:49 crc kubenswrapper[4703]: I0130 12:01:49.017989 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=16.017965589 podStartE2EDuration="16.017965589s" podCreationTimestamp="2026-01-30 12:01:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:01:49.015730569 +0000 UTC m=+344.793552233" watchObservedRunningTime="2026-01-30 12:01:49.017965589 +0000 UTC m=+344.795787293" Jan 30 12:01:49 crc kubenswrapper[4703]: I0130 12:01:49.045083 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 30 12:01:49 crc kubenswrapper[4703]: I0130 12:01:49.096069 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 30 12:01:49 crc kubenswrapper[4703]: I0130 12:01:49.159447 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 30 12:01:49 crc kubenswrapper[4703]: I0130 12:01:49.365747 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 30 12:01:49 crc kubenswrapper[4703]: I0130 12:01:49.387391 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 30 12:01:49 crc kubenswrapper[4703]: I0130 12:01:49.502896 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 30 12:01:49 crc kubenswrapper[4703]: I0130 12:01:49.521313 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 30 12:01:49 crc kubenswrapper[4703]: I0130 12:01:49.561378 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 30 12:01:49 crc kubenswrapper[4703]: I0130 12:01:49.640304 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 30 12:01:49 crc kubenswrapper[4703]: I0130 12:01:49.717105 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 30 12:01:49 crc kubenswrapper[4703]: I0130 12:01:49.725751 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 30 12:01:49 crc kubenswrapper[4703]: I0130 12:01:49.789854 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 30 12:01:50 crc kubenswrapper[4703]: I0130 12:01:50.068847 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 30 12:01:50 crc kubenswrapper[4703]: I0130 12:01:50.107644 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 30 12:01:50 crc kubenswrapper[4703]: I0130 12:01:50.251947 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 30 12:01:50 crc kubenswrapper[4703]: I0130 12:01:50.254869 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 30 12:01:50 crc kubenswrapper[4703]: I0130 12:01:50.313633 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 30 12:01:50 crc kubenswrapper[4703]: I0130 12:01:50.325186 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 30 12:01:50 crc kubenswrapper[4703]: I0130 12:01:50.358369 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 30 12:01:50 crc kubenswrapper[4703]: I0130 12:01:50.368409 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 30 12:01:50 crc kubenswrapper[4703]: I0130 12:01:50.552832 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 30 12:01:50 crc kubenswrapper[4703]: I0130 12:01:50.568278 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 30 12:01:50 crc kubenswrapper[4703]: I0130 12:01:50.688447 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 30 12:01:50 crc kubenswrapper[4703]: I0130 12:01:50.821016 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 30 12:01:50 crc kubenswrapper[4703]: I0130 12:01:50.943519 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 30 12:01:50 crc kubenswrapper[4703]: I0130 12:01:50.945842 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 30 12:01:50 crc kubenswrapper[4703]: I0130 12:01:50.947466 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 30 12:01:50 crc kubenswrapper[4703]: I0130 12:01:50.973999 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 30 12:01:50 crc kubenswrapper[4703]: I0130 12:01:50.992198 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 30 12:01:51 crc kubenswrapper[4703]: I0130 12:01:51.071193 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 30 12:01:51 crc kubenswrapper[4703]: I0130 12:01:51.115118 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:51 crc kubenswrapper[4703]: I0130 12:01:51.120871 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 30 12:01:51 crc kubenswrapper[4703]: I0130 12:01:51.137217 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 30 12:01:51 crc kubenswrapper[4703]: I0130 12:01:51.176374 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 30 12:01:51 crc kubenswrapper[4703]: I0130 12:01:51.182070 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 30 12:01:51 crc kubenswrapper[4703]: I0130 12:01:51.202647 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 30 12:01:51 crc kubenswrapper[4703]: I0130 12:01:51.300951 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 30 12:01:51 crc kubenswrapper[4703]: I0130 12:01:51.301748 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 30 12:01:51 crc kubenswrapper[4703]: I0130 12:01:51.487044 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 30 12:01:51 crc kubenswrapper[4703]: I0130 12:01:51.694022 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 30 12:01:51 crc kubenswrapper[4703]: I0130 12:01:51.727280 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 30 12:01:51 crc kubenswrapper[4703]: I0130 12:01:51.759839 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 30 12:01:51 crc kubenswrapper[4703]: I0130 12:01:51.816988 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 30 12:01:51 crc kubenswrapper[4703]: I0130 12:01:51.846378 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 30 12:01:51 crc kubenswrapper[4703]: I0130 12:01:51.910192 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 30 12:01:51 crc kubenswrapper[4703]: I0130 12:01:51.999476 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.000315 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.001273 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.006852 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.045470 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.082604 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.099374 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.183702 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.243344 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.284580 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.311281 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.356428 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.379846 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.431954 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.486902 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.632495 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.949781 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.950361 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.950876 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.950965 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.951154 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.952309 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.952465 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.952572 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.958807 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.996669 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 30 12:01:52 crc kubenswrapper[4703]: I0130 12:01:52.996669 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 30 12:01:53 crc kubenswrapper[4703]: I0130 12:01:53.016639 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 30 12:01:53 crc kubenswrapper[4703]: I0130 12:01:53.125659 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 30 12:01:53 crc kubenswrapper[4703]: I0130 12:01:53.183337 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 30 12:01:53 crc kubenswrapper[4703]: I0130 12:01:53.195151 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 30 12:01:53 crc kubenswrapper[4703]: I0130 12:01:53.245479 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 30 12:01:53 crc kubenswrapper[4703]: I0130 12:01:53.346143 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 30 12:01:53 crc kubenswrapper[4703]: I0130 12:01:53.406435 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 30 12:01:53 crc kubenswrapper[4703]: I0130 12:01:53.522194 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 30 12:01:53 crc kubenswrapper[4703]: I0130 12:01:53.545177 4703 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 30 12:01:53 crc kubenswrapper[4703]: I0130 12:01:53.558732 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 30 12:01:53 crc kubenswrapper[4703]: I0130 12:01:53.616280 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 30 12:01:53 crc kubenswrapper[4703]: I0130 12:01:53.631860 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 30 12:01:53 crc kubenswrapper[4703]: I0130 12:01:53.738534 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 30 12:01:53 crc kubenswrapper[4703]: I0130 12:01:53.782400 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 30 12:01:53 crc kubenswrapper[4703]: I0130 12:01:53.942807 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 30 12:01:53 crc kubenswrapper[4703]: I0130 12:01:53.975389 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.030481 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.031811 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.071417 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.092221 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.093870 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.163866 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.169892 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.208441 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.267998 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.274313 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.355237 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.488472 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.502447 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.509404 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.656596 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.659740 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.926329 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 30 12:01:54 crc kubenswrapper[4703]: I0130 12:01:54.947326 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 30 12:01:55 crc kubenswrapper[4703]: I0130 12:01:55.281279 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 30 12:01:55 crc kubenswrapper[4703]: I0130 12:01:55.281526 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 30 12:01:55 crc kubenswrapper[4703]: I0130 12:01:55.281690 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 30 12:01:55 crc kubenswrapper[4703]: I0130 12:01:55.281848 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 30 12:01:55 crc kubenswrapper[4703]: I0130 12:01:55.281989 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 30 12:01:55 crc kubenswrapper[4703]: I0130 12:01:55.282194 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 30 12:01:55 crc kubenswrapper[4703]: I0130 12:01:55.312781 4703 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 30 12:01:55 crc kubenswrapper[4703]: I0130 12:01:55.354090 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 30 12:01:55 crc kubenswrapper[4703]: I0130 12:01:55.455776 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 30 12:01:55 crc kubenswrapper[4703]: I0130 12:01:55.482010 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 30 12:01:55 crc kubenswrapper[4703]: I0130 12:01:55.593893 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 30 12:01:55 crc kubenswrapper[4703]: I0130 12:01:55.690963 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 30 12:01:55 crc kubenswrapper[4703]: I0130 12:01:55.932690 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 30 12:01:55 crc kubenswrapper[4703]: I0130 12:01:55.942219 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 30 12:01:55 crc kubenswrapper[4703]: I0130 12:01:55.964976 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 30 12:01:55 crc kubenswrapper[4703]: I0130 12:01:55.974851 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.081382 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.105006 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.105901 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.142847 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.146465 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.146700 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.177853 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.204854 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.651255 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.651364 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.651862 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.655805 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.655818 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.662262 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.662340 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.662767 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.662929 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.663589 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.663770 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.663944 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.669004 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.674666 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.738113 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.738846 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.761578 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.885902 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.914983 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 30 12:01:56 crc kubenswrapper[4703]: I0130 12:01:56.948543 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 30 12:01:57 crc kubenswrapper[4703]: I0130 12:01:57.198788 4703 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 30 12:01:57 crc kubenswrapper[4703]: I0130 12:01:57.199161 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://67da4d5683eccdc9d7976e29642affa7f65024d170d748a76ca72257916a40c0" gracePeriod=5 Jan 30 12:01:57 crc kubenswrapper[4703]: I0130 12:01:57.245959 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 30 12:01:57 crc kubenswrapper[4703]: I0130 12:01:57.358472 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 30 12:01:57 crc kubenswrapper[4703]: I0130 12:01:57.375676 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 30 12:01:57 crc kubenswrapper[4703]: I0130 12:01:57.402662 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 30 12:01:57 crc kubenswrapper[4703]: I0130 12:01:57.429803 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 30 12:01:57 crc kubenswrapper[4703]: I0130 12:01:57.445555 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 30 12:01:57 crc kubenswrapper[4703]: I0130 12:01:57.453883 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 30 12:01:57 crc kubenswrapper[4703]: I0130 12:01:57.505613 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 30 12:01:57 crc kubenswrapper[4703]: I0130 12:01:57.546399 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 30 12:01:57 crc kubenswrapper[4703]: I0130 12:01:57.619575 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 30 12:01:57 crc kubenswrapper[4703]: I0130 12:01:57.670912 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 30 12:01:57 crc kubenswrapper[4703]: I0130 12:01:57.789871 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 30 12:01:57 crc kubenswrapper[4703]: I0130 12:01:57.809041 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 30 12:01:57 crc kubenswrapper[4703]: I0130 12:01:57.810495 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 30 12:01:57 crc kubenswrapper[4703]: I0130 12:01:57.820304 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.402309 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.402938 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.403668 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.404068 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.602644 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.602779 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.603287 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.615555 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.615598 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.624813 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.644939 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.893209 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.895805 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.895862 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.895836 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.896190 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.898588 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.900100 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.998182 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 30 12:01:58 crc kubenswrapper[4703]: I0130 12:01:58.998737 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 30 12:01:59 crc kubenswrapper[4703]: I0130 12:01:59.213618 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 30 12:01:59 crc kubenswrapper[4703]: I0130 12:01:59.213632 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 30 12:01:59 crc kubenswrapper[4703]: I0130 12:01:59.460235 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 30 12:01:59 crc kubenswrapper[4703]: I0130 12:01:59.462570 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 30 12:01:59 crc kubenswrapper[4703]: I0130 12:01:59.462940 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 30 12:01:59 crc kubenswrapper[4703]: I0130 12:01:59.463398 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 30 12:01:59 crc kubenswrapper[4703]: I0130 12:01:59.477772 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 30 12:01:59 crc kubenswrapper[4703]: I0130 12:01:59.789484 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 30 12:01:59 crc kubenswrapper[4703]: I0130 12:01:59.789655 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 30 12:01:59 crc kubenswrapper[4703]: I0130 12:01:59.789673 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 30 12:01:59 crc kubenswrapper[4703]: I0130 12:01:59.789727 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 30 12:01:59 crc kubenswrapper[4703]: I0130 12:01:59.789770 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 30 12:01:59 crc kubenswrapper[4703]: I0130 12:01:59.878480 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 30 12:01:59 crc kubenswrapper[4703]: I0130 12:01:59.888401 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 30 12:02:00 crc kubenswrapper[4703]: I0130 12:02:00.284863 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 30 12:02:00 crc kubenswrapper[4703]: I0130 12:02:00.333969 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 30 12:02:00 crc kubenswrapper[4703]: I0130 12:02:00.426465 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 30 12:02:00 crc kubenswrapper[4703]: I0130 12:02:00.966741 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 30 12:02:02 crc kubenswrapper[4703]: I0130 12:02:02.910576 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 30 12:02:02 crc kubenswrapper[4703]: I0130 12:02:02.911190 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.084212 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.084343 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.084375 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.084382 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.084507 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.084596 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.084716 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.084757 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.084790 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.085430 4703 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.085466 4703 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.085483 4703 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.085496 4703 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.093605 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.096525 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.096888 4703 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.111068 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.111149 4703 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="4cc67a68-f1ad-44a0-8f03-b728d61aa013" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.115161 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.115211 4703 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="4cc67a68-f1ad-44a0-8f03-b728d61aa013" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.188448 4703 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.293020 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.293098 4703 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="67da4d5683eccdc9d7976e29642affa7f65024d170d748a76ca72257916a40c0" exitCode=137 Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.293185 4703 scope.go:117] "RemoveContainer" containerID="67da4d5683eccdc9d7976e29642affa7f65024d170d748a76ca72257916a40c0" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.293312 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.315385 4703 scope.go:117] "RemoveContainer" containerID="67da4d5683eccdc9d7976e29642affa7f65024d170d748a76ca72257916a40c0" Jan 30 12:02:03 crc kubenswrapper[4703]: E0130 12:02:03.316002 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67da4d5683eccdc9d7976e29642affa7f65024d170d748a76ca72257916a40c0\": container with ID starting with 67da4d5683eccdc9d7976e29642affa7f65024d170d748a76ca72257916a40c0 not found: ID does not exist" containerID="67da4d5683eccdc9d7976e29642affa7f65024d170d748a76ca72257916a40c0" Jan 30 12:02:03 crc kubenswrapper[4703]: I0130 12:02:03.316052 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67da4d5683eccdc9d7976e29642affa7f65024d170d748a76ca72257916a40c0"} err="failed to get container status \"67da4d5683eccdc9d7976e29642affa7f65024d170d748a76ca72257916a40c0\": rpc error: code = NotFound desc = could not find container \"67da4d5683eccdc9d7976e29642affa7f65024d170d748a76ca72257916a40c0\": container with ID starting with 67da4d5683eccdc9d7976e29642affa7f65024d170d748a76ca72257916a40c0 not found: ID does not exist" Jan 30 12:02:42 crc kubenswrapper[4703]: I0130 12:02:42.823977 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:02:42 crc kubenswrapper[4703]: I0130 12:02:42.824863 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.202825 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dv4qg"] Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.203674 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" podUID="034ae162-4298-4f36-8b7a-eaf5a9fe70ca" containerName="controller-manager" containerID="cri-o://d22de511f561a344d6021239c96163ad3d7a443dab0c831bb3d461b003cb55bf" gracePeriod=30 Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.302210 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8"] Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.302485 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" podUID="3e848933-041a-4f18-89af-8f369b7eebcc" containerName="route-controller-manager" containerID="cri-o://cd3dabf3d2acb56ab24c928b99f754fd0f4512498c6987aee9920a4edf3a1f59" gracePeriod=30 Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.611475 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.634137 4703 generic.go:334] "Generic (PLEG): container finished" podID="3e848933-041a-4f18-89af-8f369b7eebcc" containerID="cd3dabf3d2acb56ab24c928b99f754fd0f4512498c6987aee9920a4edf3a1f59" exitCode=0 Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.634371 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" event={"ID":"3e848933-041a-4f18-89af-8f369b7eebcc","Type":"ContainerDied","Data":"cd3dabf3d2acb56ab24c928b99f754fd0f4512498c6987aee9920a4edf3a1f59"} Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.638944 4703 generic.go:334] "Generic (PLEG): container finished" podID="034ae162-4298-4f36-8b7a-eaf5a9fe70ca" containerID="d22de511f561a344d6021239c96163ad3d7a443dab0c831bb3d461b003cb55bf" exitCode=0 Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.638991 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" event={"ID":"034ae162-4298-4f36-8b7a-eaf5a9fe70ca","Type":"ContainerDied","Data":"d22de511f561a344d6021239c96163ad3d7a443dab0c831bb3d461b003cb55bf"} Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.639018 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" event={"ID":"034ae162-4298-4f36-8b7a-eaf5a9fe70ca","Type":"ContainerDied","Data":"b26d681b258ea563d656eb3373f447a662196316ff0793594ba7a56ce826d82f"} Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.639041 4703 scope.go:117] "RemoveContainer" containerID="d22de511f561a344d6021239c96163ad3d7a443dab0c831bb3d461b003cb55bf" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.639218 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-dv4qg" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.666798 4703 scope.go:117] "RemoveContainer" containerID="d22de511f561a344d6021239c96163ad3d7a443dab0c831bb3d461b003cb55bf" Jan 30 12:02:49 crc kubenswrapper[4703]: E0130 12:02:49.667904 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d22de511f561a344d6021239c96163ad3d7a443dab0c831bb3d461b003cb55bf\": container with ID starting with d22de511f561a344d6021239c96163ad3d7a443dab0c831bb3d461b003cb55bf not found: ID does not exist" containerID="d22de511f561a344d6021239c96163ad3d7a443dab0c831bb3d461b003cb55bf" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.667944 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d22de511f561a344d6021239c96163ad3d7a443dab0c831bb3d461b003cb55bf"} err="failed to get container status \"d22de511f561a344d6021239c96163ad3d7a443dab0c831bb3d461b003cb55bf\": rpc error: code = NotFound desc = could not find container \"d22de511f561a344d6021239c96163ad3d7a443dab0c831bb3d461b003cb55bf\": container with ID starting with d22de511f561a344d6021239c96163ad3d7a443dab0c831bb3d461b003cb55bf not found: ID does not exist" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.720767 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.721830 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-config\") pod \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.721974 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-proxy-ca-bundles\") pod \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.722176 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-client-ca\") pod \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.722219 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-serving-cert\") pod \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.722254 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25mx7\" (UniqueName: \"kubernetes.io/projected/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-kube-api-access-25mx7\") pod \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\" (UID: \"034ae162-4298-4f36-8b7a-eaf5a9fe70ca\") " Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.723312 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-config" (OuterVolumeSpecName: "config") pod "034ae162-4298-4f36-8b7a-eaf5a9fe70ca" (UID: "034ae162-4298-4f36-8b7a-eaf5a9fe70ca"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.723344 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-client-ca" (OuterVolumeSpecName: "client-ca") pod "034ae162-4298-4f36-8b7a-eaf5a9fe70ca" (UID: "034ae162-4298-4f36-8b7a-eaf5a9fe70ca"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.723888 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "034ae162-4298-4f36-8b7a-eaf5a9fe70ca" (UID: "034ae162-4298-4f36-8b7a-eaf5a9fe70ca"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.731527 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "034ae162-4298-4f36-8b7a-eaf5a9fe70ca" (UID: "034ae162-4298-4f36-8b7a-eaf5a9fe70ca"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.731590 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-kube-api-access-25mx7" (OuterVolumeSpecName: "kube-api-access-25mx7") pod "034ae162-4298-4f36-8b7a-eaf5a9fe70ca" (UID: "034ae162-4298-4f36-8b7a-eaf5a9fe70ca"). InnerVolumeSpecName "kube-api-access-25mx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.824181 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e848933-041a-4f18-89af-8f369b7eebcc-config\") pod \"3e848933-041a-4f18-89af-8f369b7eebcc\" (UID: \"3e848933-041a-4f18-89af-8f369b7eebcc\") " Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.824366 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e848933-041a-4f18-89af-8f369b7eebcc-serving-cert\") pod \"3e848933-041a-4f18-89af-8f369b7eebcc\" (UID: \"3e848933-041a-4f18-89af-8f369b7eebcc\") " Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.824514 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ht7rt\" (UniqueName: \"kubernetes.io/projected/3e848933-041a-4f18-89af-8f369b7eebcc-kube-api-access-ht7rt\") pod \"3e848933-041a-4f18-89af-8f369b7eebcc\" (UID: \"3e848933-041a-4f18-89af-8f369b7eebcc\") " Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.824563 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3e848933-041a-4f18-89af-8f369b7eebcc-client-ca\") pod \"3e848933-041a-4f18-89af-8f369b7eebcc\" (UID: \"3e848933-041a-4f18-89af-8f369b7eebcc\") " Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.824887 4703 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.824911 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.824929 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25mx7\" (UniqueName: \"kubernetes.io/projected/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-kube-api-access-25mx7\") on node \"crc\" DevicePath \"\"" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.824943 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.824954 4703 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/034ae162-4298-4f36-8b7a-eaf5a9fe70ca-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.825597 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e848933-041a-4f18-89af-8f369b7eebcc-client-ca" (OuterVolumeSpecName: "client-ca") pod "3e848933-041a-4f18-89af-8f369b7eebcc" (UID: "3e848933-041a-4f18-89af-8f369b7eebcc"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.825693 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e848933-041a-4f18-89af-8f369b7eebcc-config" (OuterVolumeSpecName: "config") pod "3e848933-041a-4f18-89af-8f369b7eebcc" (UID: "3e848933-041a-4f18-89af-8f369b7eebcc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.829780 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e848933-041a-4f18-89af-8f369b7eebcc-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "3e848933-041a-4f18-89af-8f369b7eebcc" (UID: "3e848933-041a-4f18-89af-8f369b7eebcc"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.830766 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e848933-041a-4f18-89af-8f369b7eebcc-kube-api-access-ht7rt" (OuterVolumeSpecName: "kube-api-access-ht7rt") pod "3e848933-041a-4f18-89af-8f369b7eebcc" (UID: "3e848933-041a-4f18-89af-8f369b7eebcc"). InnerVolumeSpecName "kube-api-access-ht7rt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.926428 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e848933-041a-4f18-89af-8f369b7eebcc-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.926514 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ht7rt\" (UniqueName: \"kubernetes.io/projected/3e848933-041a-4f18-89af-8f369b7eebcc-kube-api-access-ht7rt\") on node \"crc\" DevicePath \"\"" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.926535 4703 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3e848933-041a-4f18-89af-8f369b7eebcc-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.926549 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e848933-041a-4f18-89af-8f369b7eebcc-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:02:49 crc kubenswrapper[4703]: I0130 12:02:49.997284 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dv4qg"] Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.002194 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dv4qg"] Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.259445 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6499544d6b-prrgt"] Jan 30 12:02:50 crc kubenswrapper[4703]: E0130 12:02:50.259949 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="034ae162-4298-4f36-8b7a-eaf5a9fe70ca" containerName="controller-manager" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.259994 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="034ae162-4298-4f36-8b7a-eaf5a9fe70ca" containerName="controller-manager" Jan 30 12:02:50 crc kubenswrapper[4703]: E0130 12:02:50.260017 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" containerName="installer" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.260027 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" containerName="installer" Jan 30 12:02:50 crc kubenswrapper[4703]: E0130 12:02:50.260048 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.260057 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 30 12:02:50 crc kubenswrapper[4703]: E0130 12:02:50.260074 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e848933-041a-4f18-89af-8f369b7eebcc" containerName="route-controller-manager" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.260082 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e848933-041a-4f18-89af-8f369b7eebcc" containerName="route-controller-manager" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.260255 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="034ae162-4298-4f36-8b7a-eaf5a9fe70ca" containerName="controller-manager" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.260270 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.260287 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="69ea44cc-6b13-4b1e-9621-448115bc3090" containerName="installer" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.260301 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e848933-041a-4f18-89af-8f369b7eebcc" containerName="route-controller-manager" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.260980 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.263419 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.263604 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.266548 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.266583 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.268074 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.274072 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6499544d6b-prrgt"] Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.281363 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.281801 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.433516 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-config\") pod \"controller-manager-6499544d6b-prrgt\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.433619 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qqcr\" (UniqueName: \"kubernetes.io/projected/962159a3-7b85-4133-9f87-b88f2d22ff8b-kube-api-access-5qqcr\") pod \"controller-manager-6499544d6b-prrgt\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.433698 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-client-ca\") pod \"controller-manager-6499544d6b-prrgt\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.433792 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-proxy-ca-bundles\") pod \"controller-manager-6499544d6b-prrgt\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.433924 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/962159a3-7b85-4133-9f87-b88f2d22ff8b-serving-cert\") pod \"controller-manager-6499544d6b-prrgt\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.534992 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/962159a3-7b85-4133-9f87-b88f2d22ff8b-serving-cert\") pod \"controller-manager-6499544d6b-prrgt\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.535073 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-config\") pod \"controller-manager-6499544d6b-prrgt\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.535105 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qqcr\" (UniqueName: \"kubernetes.io/projected/962159a3-7b85-4133-9f87-b88f2d22ff8b-kube-api-access-5qqcr\") pod \"controller-manager-6499544d6b-prrgt\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.535832 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-client-ca\") pod \"controller-manager-6499544d6b-prrgt\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.535942 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-proxy-ca-bundles\") pod \"controller-manager-6499544d6b-prrgt\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.537614 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-client-ca\") pod \"controller-manager-6499544d6b-prrgt\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.537879 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-config\") pod \"controller-manager-6499544d6b-prrgt\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.537915 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-proxy-ca-bundles\") pod \"controller-manager-6499544d6b-prrgt\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.542244 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/962159a3-7b85-4133-9f87-b88f2d22ff8b-serving-cert\") pod \"controller-manager-6499544d6b-prrgt\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.555827 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qqcr\" (UniqueName: \"kubernetes.io/projected/962159a3-7b85-4133-9f87-b88f2d22ff8b-kube-api-access-5qqcr\") pod \"controller-manager-6499544d6b-prrgt\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.582532 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.649885 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.649877 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8" event={"ID":"3e848933-041a-4f18-89af-8f369b7eebcc","Type":"ContainerDied","Data":"c2ffca71ca103ef4fcd3194fd388a4008ae9c4a510b9f35b7fb41480d0019d77"} Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.650052 4703 scope.go:117] "RemoveContainer" containerID="cd3dabf3d2acb56ab24c928b99f754fd0f4512498c6987aee9920a4edf3a1f59" Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.693313 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8"] Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.703417 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-x28r8"] Jan 30 12:02:50 crc kubenswrapper[4703]: I0130 12:02:50.855502 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6499544d6b-prrgt"] Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.095098 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="034ae162-4298-4f36-8b7a-eaf5a9fe70ca" path="/var/lib/kubelet/pods/034ae162-4298-4f36-8b7a-eaf5a9fe70ca/volumes" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.096620 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e848933-041a-4f18-89af-8f369b7eebcc" path="/var/lib/kubelet/pods/3e848933-041a-4f18-89af-8f369b7eebcc/volumes" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.258018 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt"] Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.259241 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.264769 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.265058 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.265158 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.265551 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.265818 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.266161 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.279332 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt"] Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.350828 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/982e86a0-5b19-439a-856b-ad80e89948e1-config\") pod \"route-controller-manager-69f87ffdb6-m4cgt\" (UID: \"982e86a0-5b19-439a-856b-ad80e89948e1\") " pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.350882 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhgp2\" (UniqueName: \"kubernetes.io/projected/982e86a0-5b19-439a-856b-ad80e89948e1-kube-api-access-zhgp2\") pod \"route-controller-manager-69f87ffdb6-m4cgt\" (UID: \"982e86a0-5b19-439a-856b-ad80e89948e1\") " pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.350935 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/982e86a0-5b19-439a-856b-ad80e89948e1-client-ca\") pod \"route-controller-manager-69f87ffdb6-m4cgt\" (UID: \"982e86a0-5b19-439a-856b-ad80e89948e1\") " pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.350961 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/982e86a0-5b19-439a-856b-ad80e89948e1-serving-cert\") pod \"route-controller-manager-69f87ffdb6-m4cgt\" (UID: \"982e86a0-5b19-439a-856b-ad80e89948e1\") " pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.452645 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/982e86a0-5b19-439a-856b-ad80e89948e1-client-ca\") pod \"route-controller-manager-69f87ffdb6-m4cgt\" (UID: \"982e86a0-5b19-439a-856b-ad80e89948e1\") " pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.452718 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/982e86a0-5b19-439a-856b-ad80e89948e1-serving-cert\") pod \"route-controller-manager-69f87ffdb6-m4cgt\" (UID: \"982e86a0-5b19-439a-856b-ad80e89948e1\") " pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.452801 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhgp2\" (UniqueName: \"kubernetes.io/projected/982e86a0-5b19-439a-856b-ad80e89948e1-kube-api-access-zhgp2\") pod \"route-controller-manager-69f87ffdb6-m4cgt\" (UID: \"982e86a0-5b19-439a-856b-ad80e89948e1\") " pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.452834 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/982e86a0-5b19-439a-856b-ad80e89948e1-config\") pod \"route-controller-manager-69f87ffdb6-m4cgt\" (UID: \"982e86a0-5b19-439a-856b-ad80e89948e1\") " pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.454269 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/982e86a0-5b19-439a-856b-ad80e89948e1-config\") pod \"route-controller-manager-69f87ffdb6-m4cgt\" (UID: \"982e86a0-5b19-439a-856b-ad80e89948e1\") " pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.454892 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/982e86a0-5b19-439a-856b-ad80e89948e1-client-ca\") pod \"route-controller-manager-69f87ffdb6-m4cgt\" (UID: \"982e86a0-5b19-439a-856b-ad80e89948e1\") " pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.461316 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/982e86a0-5b19-439a-856b-ad80e89948e1-serving-cert\") pod \"route-controller-manager-69f87ffdb6-m4cgt\" (UID: \"982e86a0-5b19-439a-856b-ad80e89948e1\") " pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.477004 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhgp2\" (UniqueName: \"kubernetes.io/projected/982e86a0-5b19-439a-856b-ad80e89948e1-kube-api-access-zhgp2\") pod \"route-controller-manager-69f87ffdb6-m4cgt\" (UID: \"982e86a0-5b19-439a-856b-ad80e89948e1\") " pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.585303 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.666949 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" event={"ID":"962159a3-7b85-4133-9f87-b88f2d22ff8b","Type":"ContainerStarted","Data":"2c93286b3133e5400dda5f3a9c76ee799c98635a7dd2a4b3d709b7760a02713d"} Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.667755 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" event={"ID":"962159a3-7b85-4133-9f87-b88f2d22ff8b","Type":"ContainerStarted","Data":"a98b2d89ccaf28551c23a3bb81f53e6e5822dc72ed4386a2f7c4d06c47a78400"} Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.668357 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.677613 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.708744 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" podStartSLOduration=2.708725454 podStartE2EDuration="2.708725454s" podCreationTimestamp="2026-01-30 12:02:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:02:51.705869861 +0000 UTC m=+407.483691515" watchObservedRunningTime="2026-01-30 12:02:51.708725454 +0000 UTC m=+407.486547108" Jan 30 12:02:51 crc kubenswrapper[4703]: I0130 12:02:51.836088 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt"] Jan 30 12:02:51 crc kubenswrapper[4703]: W0130 12:02:51.844374 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod982e86a0_5b19_439a_856b_ad80e89948e1.slice/crio-e5d5b63434da0068a558f484ed31588345fc1204a6ce4043521a88cf53cd946a WatchSource:0}: Error finding container e5d5b63434da0068a558f484ed31588345fc1204a6ce4043521a88cf53cd946a: Status 404 returned error can't find the container with id e5d5b63434da0068a558f484ed31588345fc1204a6ce4043521a88cf53cd946a Jan 30 12:02:52 crc kubenswrapper[4703]: I0130 12:02:52.684438 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" event={"ID":"982e86a0-5b19-439a-856b-ad80e89948e1","Type":"ContainerStarted","Data":"238e7413fd4b821e3d14d6e874182e4ba58a86aed0fd9879c836d29928f71e99"} Jan 30 12:02:52 crc kubenswrapper[4703]: I0130 12:02:52.685180 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:02:52 crc kubenswrapper[4703]: I0130 12:02:52.685203 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" event={"ID":"982e86a0-5b19-439a-856b-ad80e89948e1","Type":"ContainerStarted","Data":"e5d5b63434da0068a558f484ed31588345fc1204a6ce4043521a88cf53cd946a"} Jan 30 12:02:52 crc kubenswrapper[4703]: I0130 12:02:52.693228 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:02:52 crc kubenswrapper[4703]: I0130 12:02:52.710679 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" podStartSLOduration=3.7106623020000002 podStartE2EDuration="3.710662302s" podCreationTimestamp="2026-01-30 12:02:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:02:52.707601504 +0000 UTC m=+408.485423158" watchObservedRunningTime="2026-01-30 12:02:52.710662302 +0000 UTC m=+408.488483956" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.046955 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-dk9mj"] Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.047768 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.067320 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-dk9mj"] Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.179943 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ab63a218-f804-43ee-8dd9-06726ec9dfbf-bound-sa-token\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.180055 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlg4g\" (UniqueName: \"kubernetes.io/projected/ab63a218-f804-43ee-8dd9-06726ec9dfbf-kube-api-access-hlg4g\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.180099 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ab63a218-f804-43ee-8dd9-06726ec9dfbf-registry-certificates\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.180178 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ab63a218-f804-43ee-8dd9-06726ec9dfbf-trusted-ca\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.180228 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ab63a218-f804-43ee-8dd9-06726ec9dfbf-ca-trust-extracted\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.180257 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ab63a218-f804-43ee-8dd9-06726ec9dfbf-registry-tls\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.180299 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ab63a218-f804-43ee-8dd9-06726ec9dfbf-installation-pull-secrets\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.180345 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.211890 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.282709 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ab63a218-f804-43ee-8dd9-06726ec9dfbf-bound-sa-token\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.283271 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlg4g\" (UniqueName: \"kubernetes.io/projected/ab63a218-f804-43ee-8dd9-06726ec9dfbf-kube-api-access-hlg4g\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.283453 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ab63a218-f804-43ee-8dd9-06726ec9dfbf-registry-certificates\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.283608 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ab63a218-f804-43ee-8dd9-06726ec9dfbf-trusted-ca\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.283782 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ab63a218-f804-43ee-8dd9-06726ec9dfbf-ca-trust-extracted\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.285286 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ab63a218-f804-43ee-8dd9-06726ec9dfbf-registry-tls\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.285503 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ab63a218-f804-43ee-8dd9-06726ec9dfbf-installation-pull-secrets\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.286505 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ab63a218-f804-43ee-8dd9-06726ec9dfbf-ca-trust-extracted\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.287346 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ab63a218-f804-43ee-8dd9-06726ec9dfbf-trusted-ca\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.287474 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ab63a218-f804-43ee-8dd9-06726ec9dfbf-registry-certificates\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.292843 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ab63a218-f804-43ee-8dd9-06726ec9dfbf-registry-tls\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.292843 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ab63a218-f804-43ee-8dd9-06726ec9dfbf-installation-pull-secrets\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.307024 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlg4g\" (UniqueName: \"kubernetes.io/projected/ab63a218-f804-43ee-8dd9-06726ec9dfbf-kube-api-access-hlg4g\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.310345 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ab63a218-f804-43ee-8dd9-06726ec9dfbf-bound-sa-token\") pod \"image-registry-66df7c8f76-dk9mj\" (UID: \"ab63a218-f804-43ee-8dd9-06726ec9dfbf\") " pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.366571 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.632145 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-dk9mj"] Jan 30 12:02:53 crc kubenswrapper[4703]: W0130 12:02:53.635738 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podab63a218_f804_43ee_8dd9_06726ec9dfbf.slice/crio-ed2f3f3f28d4f771dae87a8f010cb54bad981df0ed4421e5451c150ab1050297 WatchSource:0}: Error finding container ed2f3f3f28d4f771dae87a8f010cb54bad981df0ed4421e5451c150ab1050297: Status 404 returned error can't find the container with id ed2f3f3f28d4f771dae87a8f010cb54bad981df0ed4421e5451c150ab1050297 Jan 30 12:02:53 crc kubenswrapper[4703]: I0130 12:02:53.691999 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" event={"ID":"ab63a218-f804-43ee-8dd9-06726ec9dfbf","Type":"ContainerStarted","Data":"ed2f3f3f28d4f771dae87a8f010cb54bad981df0ed4421e5451c150ab1050297"} Jan 30 12:02:54 crc kubenswrapper[4703]: I0130 12:02:54.702209 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" event={"ID":"ab63a218-f804-43ee-8dd9-06726ec9dfbf","Type":"ContainerStarted","Data":"f6ecf52e39ba9ecc8863af24e6462eb4ecf4c5d91389531cff8cd07369e38fe4"} Jan 30 12:02:54 crc kubenswrapper[4703]: I0130 12:02:54.733826 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" podStartSLOduration=1.733788214 podStartE2EDuration="1.733788214s" podCreationTimestamp="2026-01-30 12:02:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:02:54.731023154 +0000 UTC m=+410.508844808" watchObservedRunningTime="2026-01-30 12:02:54.733788214 +0000 UTC m=+410.511609868" Jan 30 12:02:55 crc kubenswrapper[4703]: I0130 12:02:55.709147 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.103290 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6499544d6b-prrgt"] Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.103941 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" podUID="962159a3-7b85-4133-9f87-b88f2d22ff8b" containerName="controller-manager" containerID="cri-o://2c93286b3133e5400dda5f3a9c76ee799c98635a7dd2a4b3d709b7760a02713d" gracePeriod=30 Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.123369 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt"] Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.123601 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" podUID="982e86a0-5b19-439a-856b-ad80e89948e1" containerName="route-controller-manager" containerID="cri-o://238e7413fd4b821e3d14d6e874182e4ba58a86aed0fd9879c836d29928f71e99" gracePeriod=30 Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.756015 4703 generic.go:334] "Generic (PLEG): container finished" podID="962159a3-7b85-4133-9f87-b88f2d22ff8b" containerID="2c93286b3133e5400dda5f3a9c76ee799c98635a7dd2a4b3d709b7760a02713d" exitCode=0 Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.756638 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" event={"ID":"962159a3-7b85-4133-9f87-b88f2d22ff8b","Type":"ContainerDied","Data":"2c93286b3133e5400dda5f3a9c76ee799c98635a7dd2a4b3d709b7760a02713d"} Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.759215 4703 generic.go:334] "Generic (PLEG): container finished" podID="982e86a0-5b19-439a-856b-ad80e89948e1" containerID="238e7413fd4b821e3d14d6e874182e4ba58a86aed0fd9879c836d29928f71e99" exitCode=0 Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.759297 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" event={"ID":"982e86a0-5b19-439a-856b-ad80e89948e1","Type":"ContainerDied","Data":"238e7413fd4b821e3d14d6e874182e4ba58a86aed0fd9879c836d29928f71e99"} Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.759358 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" event={"ID":"982e86a0-5b19-439a-856b-ad80e89948e1","Type":"ContainerDied","Data":"e5d5b63434da0068a558f484ed31588345fc1204a6ce4043521a88cf53cd946a"} Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.759377 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5d5b63434da0068a558f484ed31588345fc1204a6ce4043521a88cf53cd946a" Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.760863 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.819364 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.830854 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/982e86a0-5b19-439a-856b-ad80e89948e1-serving-cert\") pod \"982e86a0-5b19-439a-856b-ad80e89948e1\" (UID: \"982e86a0-5b19-439a-856b-ad80e89948e1\") " Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.830950 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/982e86a0-5b19-439a-856b-ad80e89948e1-client-ca\") pod \"982e86a0-5b19-439a-856b-ad80e89948e1\" (UID: \"982e86a0-5b19-439a-856b-ad80e89948e1\") " Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.831029 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/982e86a0-5b19-439a-856b-ad80e89948e1-config\") pod \"982e86a0-5b19-439a-856b-ad80e89948e1\" (UID: \"982e86a0-5b19-439a-856b-ad80e89948e1\") " Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.831153 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zhgp2\" (UniqueName: \"kubernetes.io/projected/982e86a0-5b19-439a-856b-ad80e89948e1-kube-api-access-zhgp2\") pod \"982e86a0-5b19-439a-856b-ad80e89948e1\" (UID: \"982e86a0-5b19-439a-856b-ad80e89948e1\") " Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.833880 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/982e86a0-5b19-439a-856b-ad80e89948e1-config" (OuterVolumeSpecName: "config") pod "982e86a0-5b19-439a-856b-ad80e89948e1" (UID: "982e86a0-5b19-439a-856b-ad80e89948e1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.833879 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/982e86a0-5b19-439a-856b-ad80e89948e1-client-ca" (OuterVolumeSpecName: "client-ca") pod "982e86a0-5b19-439a-856b-ad80e89948e1" (UID: "982e86a0-5b19-439a-856b-ad80e89948e1"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.841740 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/982e86a0-5b19-439a-856b-ad80e89948e1-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "982e86a0-5b19-439a-856b-ad80e89948e1" (UID: "982e86a0-5b19-439a-856b-ad80e89948e1"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.842519 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/982e86a0-5b19-439a-856b-ad80e89948e1-kube-api-access-zhgp2" (OuterVolumeSpecName: "kube-api-access-zhgp2") pod "982e86a0-5b19-439a-856b-ad80e89948e1" (UID: "982e86a0-5b19-439a-856b-ad80e89948e1"). InnerVolumeSpecName "kube-api-access-zhgp2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.932826 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5qqcr\" (UniqueName: \"kubernetes.io/projected/962159a3-7b85-4133-9f87-b88f2d22ff8b-kube-api-access-5qqcr\") pod \"962159a3-7b85-4133-9f87-b88f2d22ff8b\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.933034 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-config\") pod \"962159a3-7b85-4133-9f87-b88f2d22ff8b\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.933134 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/962159a3-7b85-4133-9f87-b88f2d22ff8b-serving-cert\") pod \"962159a3-7b85-4133-9f87-b88f2d22ff8b\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.933190 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-client-ca\") pod \"962159a3-7b85-4133-9f87-b88f2d22ff8b\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.933232 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-proxy-ca-bundles\") pod \"962159a3-7b85-4133-9f87-b88f2d22ff8b\" (UID: \"962159a3-7b85-4133-9f87-b88f2d22ff8b\") " Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.933566 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/982e86a0-5b19-439a-856b-ad80e89948e1-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.933588 4703 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/982e86a0-5b19-439a-856b-ad80e89948e1-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.933598 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/982e86a0-5b19-439a-856b-ad80e89948e1-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.933607 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zhgp2\" (UniqueName: \"kubernetes.io/projected/982e86a0-5b19-439a-856b-ad80e89948e1-kube-api-access-zhgp2\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.934829 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "962159a3-7b85-4133-9f87-b88f2d22ff8b" (UID: "962159a3-7b85-4133-9f87-b88f2d22ff8b"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.934816 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-client-ca" (OuterVolumeSpecName: "client-ca") pod "962159a3-7b85-4133-9f87-b88f2d22ff8b" (UID: "962159a3-7b85-4133-9f87-b88f2d22ff8b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.935018 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-config" (OuterVolumeSpecName: "config") pod "962159a3-7b85-4133-9f87-b88f2d22ff8b" (UID: "962159a3-7b85-4133-9f87-b88f2d22ff8b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.940567 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/962159a3-7b85-4133-9f87-b88f2d22ff8b-kube-api-access-5qqcr" (OuterVolumeSpecName: "kube-api-access-5qqcr") pod "962159a3-7b85-4133-9f87-b88f2d22ff8b" (UID: "962159a3-7b85-4133-9f87-b88f2d22ff8b"). InnerVolumeSpecName "kube-api-access-5qqcr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:03:02 crc kubenswrapper[4703]: I0130 12:03:02.940949 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/962159a3-7b85-4133-9f87-b88f2d22ff8b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "962159a3-7b85-4133-9f87-b88f2d22ff8b" (UID: "962159a3-7b85-4133-9f87-b88f2d22ff8b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.035985 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5qqcr\" (UniqueName: \"kubernetes.io/projected/962159a3-7b85-4133-9f87-b88f2d22ff8b-kube-api-access-5qqcr\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.036047 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.036063 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/962159a3-7b85-4133-9f87-b88f2d22ff8b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.036077 4703 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.036090 4703 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/962159a3-7b85-4133-9f87-b88f2d22ff8b-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.269010 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f"] Jan 30 12:03:03 crc kubenswrapper[4703]: E0130 12:03:03.269384 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="962159a3-7b85-4133-9f87-b88f2d22ff8b" containerName="controller-manager" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.269413 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="962159a3-7b85-4133-9f87-b88f2d22ff8b" containerName="controller-manager" Jan 30 12:03:03 crc kubenswrapper[4703]: E0130 12:03:03.269439 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="982e86a0-5b19-439a-856b-ad80e89948e1" containerName="route-controller-manager" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.269445 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="982e86a0-5b19-439a-856b-ad80e89948e1" containerName="route-controller-manager" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.269566 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="962159a3-7b85-4133-9f87-b88f2d22ff8b" containerName="controller-manager" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.269579 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="982e86a0-5b19-439a-856b-ad80e89948e1" containerName="route-controller-manager" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.270051 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.285196 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f"] Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.341463 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29877b14-3f45-4f01-b108-0ff48657529b-config\") pod \"controller-manager-85dcb44cbd-v5q6f\" (UID: \"29877b14-3f45-4f01-b108-0ff48657529b\") " pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.341555 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjt5t\" (UniqueName: \"kubernetes.io/projected/29877b14-3f45-4f01-b108-0ff48657529b-kube-api-access-wjt5t\") pod \"controller-manager-85dcb44cbd-v5q6f\" (UID: \"29877b14-3f45-4f01-b108-0ff48657529b\") " pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.341886 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29877b14-3f45-4f01-b108-0ff48657529b-serving-cert\") pod \"controller-manager-85dcb44cbd-v5q6f\" (UID: \"29877b14-3f45-4f01-b108-0ff48657529b\") " pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.341976 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29877b14-3f45-4f01-b108-0ff48657529b-client-ca\") pod \"controller-manager-85dcb44cbd-v5q6f\" (UID: \"29877b14-3f45-4f01-b108-0ff48657529b\") " pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.342091 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/29877b14-3f45-4f01-b108-0ff48657529b-proxy-ca-bundles\") pod \"controller-manager-85dcb44cbd-v5q6f\" (UID: \"29877b14-3f45-4f01-b108-0ff48657529b\") " pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.443559 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29877b14-3f45-4f01-b108-0ff48657529b-config\") pod \"controller-manager-85dcb44cbd-v5q6f\" (UID: \"29877b14-3f45-4f01-b108-0ff48657529b\") " pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.443623 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjt5t\" (UniqueName: \"kubernetes.io/projected/29877b14-3f45-4f01-b108-0ff48657529b-kube-api-access-wjt5t\") pod \"controller-manager-85dcb44cbd-v5q6f\" (UID: \"29877b14-3f45-4f01-b108-0ff48657529b\") " pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.443678 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29877b14-3f45-4f01-b108-0ff48657529b-serving-cert\") pod \"controller-manager-85dcb44cbd-v5q6f\" (UID: \"29877b14-3f45-4f01-b108-0ff48657529b\") " pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.443699 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29877b14-3f45-4f01-b108-0ff48657529b-client-ca\") pod \"controller-manager-85dcb44cbd-v5q6f\" (UID: \"29877b14-3f45-4f01-b108-0ff48657529b\") " pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.443722 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/29877b14-3f45-4f01-b108-0ff48657529b-proxy-ca-bundles\") pod \"controller-manager-85dcb44cbd-v5q6f\" (UID: \"29877b14-3f45-4f01-b108-0ff48657529b\") " pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.444936 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/29877b14-3f45-4f01-b108-0ff48657529b-proxy-ca-bundles\") pod \"controller-manager-85dcb44cbd-v5q6f\" (UID: \"29877b14-3f45-4f01-b108-0ff48657529b\") " pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.445856 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29877b14-3f45-4f01-b108-0ff48657529b-config\") pod \"controller-manager-85dcb44cbd-v5q6f\" (UID: \"29877b14-3f45-4f01-b108-0ff48657529b\") " pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.446305 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/29877b14-3f45-4f01-b108-0ff48657529b-client-ca\") pod \"controller-manager-85dcb44cbd-v5q6f\" (UID: \"29877b14-3f45-4f01-b108-0ff48657529b\") " pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.455534 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/29877b14-3f45-4f01-b108-0ff48657529b-serving-cert\") pod \"controller-manager-85dcb44cbd-v5q6f\" (UID: \"29877b14-3f45-4f01-b108-0ff48657529b\") " pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.470091 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjt5t\" (UniqueName: \"kubernetes.io/projected/29877b14-3f45-4f01-b108-0ff48657529b-kube-api-access-wjt5t\") pod \"controller-manager-85dcb44cbd-v5q6f\" (UID: \"29877b14-3f45-4f01-b108-0ff48657529b\") " pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.588332 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.770576 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.770578 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" event={"ID":"962159a3-7b85-4133-9f87-b88f2d22ff8b","Type":"ContainerDied","Data":"a98b2d89ccaf28551c23a3bb81f53e6e5822dc72ed4386a2f7c4d06c47a78400"} Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.771454 4703 scope.go:117] "RemoveContainer" containerID="2c93286b3133e5400dda5f3a9c76ee799c98635a7dd2a4b3d709b7760a02713d" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.770628 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6499544d6b-prrgt" Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.802441 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt"] Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.806790 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-69f87ffdb6-m4cgt"] Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.838827 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6499544d6b-prrgt"] Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.847825 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6499544d6b-prrgt"] Jan 30 12:03:03 crc kubenswrapper[4703]: I0130 12:03:03.859696 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f"] Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.270976 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6"] Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.273184 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.279559 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.279798 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.279829 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.280367 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.280647 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.288588 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.296550 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6"] Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.361429 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snq7f\" (UniqueName: \"kubernetes.io/projected/65a008e3-b446-48cb-8be5-d2ea94b456e2-kube-api-access-snq7f\") pod \"route-controller-manager-7fdf5f4875-7wtr6\" (UID: \"65a008e3-b446-48cb-8be5-d2ea94b456e2\") " pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.361517 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/65a008e3-b446-48cb-8be5-d2ea94b456e2-client-ca\") pod \"route-controller-manager-7fdf5f4875-7wtr6\" (UID: \"65a008e3-b446-48cb-8be5-d2ea94b456e2\") " pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.361581 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65a008e3-b446-48cb-8be5-d2ea94b456e2-config\") pod \"route-controller-manager-7fdf5f4875-7wtr6\" (UID: \"65a008e3-b446-48cb-8be5-d2ea94b456e2\") " pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.361621 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/65a008e3-b446-48cb-8be5-d2ea94b456e2-serving-cert\") pod \"route-controller-manager-7fdf5f4875-7wtr6\" (UID: \"65a008e3-b446-48cb-8be5-d2ea94b456e2\") " pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.463354 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65a008e3-b446-48cb-8be5-d2ea94b456e2-config\") pod \"route-controller-manager-7fdf5f4875-7wtr6\" (UID: \"65a008e3-b446-48cb-8be5-d2ea94b456e2\") " pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.463438 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/65a008e3-b446-48cb-8be5-d2ea94b456e2-serving-cert\") pod \"route-controller-manager-7fdf5f4875-7wtr6\" (UID: \"65a008e3-b446-48cb-8be5-d2ea94b456e2\") " pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.463479 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snq7f\" (UniqueName: \"kubernetes.io/projected/65a008e3-b446-48cb-8be5-d2ea94b456e2-kube-api-access-snq7f\") pod \"route-controller-manager-7fdf5f4875-7wtr6\" (UID: \"65a008e3-b446-48cb-8be5-d2ea94b456e2\") " pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.463528 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/65a008e3-b446-48cb-8be5-d2ea94b456e2-client-ca\") pod \"route-controller-manager-7fdf5f4875-7wtr6\" (UID: \"65a008e3-b446-48cb-8be5-d2ea94b456e2\") " pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.465076 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/65a008e3-b446-48cb-8be5-d2ea94b456e2-client-ca\") pod \"route-controller-manager-7fdf5f4875-7wtr6\" (UID: \"65a008e3-b446-48cb-8be5-d2ea94b456e2\") " pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.465077 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65a008e3-b446-48cb-8be5-d2ea94b456e2-config\") pod \"route-controller-manager-7fdf5f4875-7wtr6\" (UID: \"65a008e3-b446-48cb-8be5-d2ea94b456e2\") " pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.473265 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/65a008e3-b446-48cb-8be5-d2ea94b456e2-serving-cert\") pod \"route-controller-manager-7fdf5f4875-7wtr6\" (UID: \"65a008e3-b446-48cb-8be5-d2ea94b456e2\") " pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.499025 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snq7f\" (UniqueName: \"kubernetes.io/projected/65a008e3-b446-48cb-8be5-d2ea94b456e2-kube-api-access-snq7f\") pod \"route-controller-manager-7fdf5f4875-7wtr6\" (UID: \"65a008e3-b446-48cb-8be5-d2ea94b456e2\") " pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.638888 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.841638 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" event={"ID":"29877b14-3f45-4f01-b108-0ff48657529b","Type":"ContainerStarted","Data":"4679d3c0b524df34908cbc9edde5709073ee7972409e14f7b6f2a8ac503052e8"} Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.842161 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" event={"ID":"29877b14-3f45-4f01-b108-0ff48657529b","Type":"ContainerStarted","Data":"27b533f68c32ef497af3ef45e3162363897ce73bc6eaf09ecdcc4544c0a67940"} Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.843781 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.866815 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" Jan 30 12:03:04 crc kubenswrapper[4703]: I0130 12:03:04.879308 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-85dcb44cbd-v5q6f" podStartSLOduration=2.879273298 podStartE2EDuration="2.879273298s" podCreationTimestamp="2026-01-30 12:03:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:03:04.876259351 +0000 UTC m=+420.654081005" watchObservedRunningTime="2026-01-30 12:03:04.879273298 +0000 UTC m=+420.657094952" Jan 30 12:03:05 crc kubenswrapper[4703]: I0130 12:03:05.017049 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6"] Jan 30 12:03:05 crc kubenswrapper[4703]: W0130 12:03:05.026745 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod65a008e3_b446_48cb_8be5_d2ea94b456e2.slice/crio-b18589a4405172b2627d61ae00c3114a2fb9b07c5f8485a37c0b40324de2d752 WatchSource:0}: Error finding container b18589a4405172b2627d61ae00c3114a2fb9b07c5f8485a37c0b40324de2d752: Status 404 returned error can't find the container with id b18589a4405172b2627d61ae00c3114a2fb9b07c5f8485a37c0b40324de2d752 Jan 30 12:03:05 crc kubenswrapper[4703]: I0130 12:03:05.097058 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="962159a3-7b85-4133-9f87-b88f2d22ff8b" path="/var/lib/kubelet/pods/962159a3-7b85-4133-9f87-b88f2d22ff8b/volumes" Jan 30 12:03:05 crc kubenswrapper[4703]: I0130 12:03:05.097942 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="982e86a0-5b19-439a-856b-ad80e89948e1" path="/var/lib/kubelet/pods/982e86a0-5b19-439a-856b-ad80e89948e1/volumes" Jan 30 12:03:05 crc kubenswrapper[4703]: I0130 12:03:05.854138 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" event={"ID":"65a008e3-b446-48cb-8be5-d2ea94b456e2","Type":"ContainerStarted","Data":"a58db7a8b53900cdabb2441a13de541629274b3baab9c92543ae23bc7030f14a"} Jan 30 12:03:05 crc kubenswrapper[4703]: I0130 12:03:05.854774 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" event={"ID":"65a008e3-b446-48cb-8be5-d2ea94b456e2","Type":"ContainerStarted","Data":"b18589a4405172b2627d61ae00c3114a2fb9b07c5f8485a37c0b40324de2d752"} Jan 30 12:03:05 crc kubenswrapper[4703]: I0130 12:03:05.889280 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" podStartSLOduration=3.889246488 podStartE2EDuration="3.889246488s" podCreationTimestamp="2026-01-30 12:03:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:03:05.883246275 +0000 UTC m=+421.661067929" watchObservedRunningTime="2026-01-30 12:03:05.889246488 +0000 UTC m=+421.667068142" Jan 30 12:03:06 crc kubenswrapper[4703]: I0130 12:03:06.760262 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8gf7w"] Jan 30 12:03:06 crc kubenswrapper[4703]: I0130 12:03:06.761341 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-8gf7w" podUID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" containerName="registry-server" containerID="cri-o://2c3df39413546964e4c718057c5c9dec2ae14b2ff21a563454e4a895c84b197f" gracePeriod=2 Jan 30 12:03:06 crc kubenswrapper[4703]: I0130 12:03:06.955672 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:06 crc kubenswrapper[4703]: I0130 12:03:06.962561 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:06 crc kubenswrapper[4703]: I0130 12:03:06.966069 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-64ptg"] Jan 30 12:03:06 crc kubenswrapper[4703]: I0130 12:03:06.966489 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-64ptg" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" containerName="registry-server" containerID="cri-o://b0ccce95a18e7ebdacfe38059008165099e4f3c7a59933364b5fb446bf74072d" gracePeriod=2 Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.459023 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-64ptg" Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.559819 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wh47z\" (UniqueName: \"kubernetes.io/projected/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-kube-api-access-wh47z\") pod \"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa\" (UID: \"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa\") " Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.559922 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-catalog-content\") pod \"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa\" (UID: \"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa\") " Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.559984 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-utilities\") pod \"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa\" (UID: \"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa\") " Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.561725 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-utilities" (OuterVolumeSpecName: "utilities") pod "7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" (UID: "7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.572093 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-kube-api-access-wh47z" (OuterVolumeSpecName: "kube-api-access-wh47z") pod "7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" (UID: "7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa"). InnerVolumeSpecName "kube-api-access-wh47z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.623041 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" (UID: "7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.662816 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wh47z\" (UniqueName: \"kubernetes.io/projected/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-kube-api-access-wh47z\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.662878 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.662892 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.854349 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8gf7w" Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.966336 4703 generic.go:334] "Generic (PLEG): container finished" podID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" containerID="2c3df39413546964e4c718057c5c9dec2ae14b2ff21a563454e4a895c84b197f" exitCode=0 Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.966433 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8gf7w" event={"ID":"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63","Type":"ContainerDied","Data":"2c3df39413546964e4c718057c5c9dec2ae14b2ff21a563454e4a895c84b197f"} Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.966503 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8gf7w" Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.966529 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8gf7w" event={"ID":"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63","Type":"ContainerDied","Data":"736f7b46577942901a1dd69fdfca4905edae1e0037b59192de727ae55b98b7f0"} Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.966558 4703 scope.go:117] "RemoveContainer" containerID="2c3df39413546964e4c718057c5c9dec2ae14b2ff21a563454e4a895c84b197f" Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.968403 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-catalog-content\") pod \"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63\" (UID: \"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63\") " Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.968494 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-546ch\" (UniqueName: \"kubernetes.io/projected/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-kube-api-access-546ch\") pod \"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63\" (UID: \"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63\") " Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.968610 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-utilities\") pod \"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63\" (UID: \"2bfef4b4-9e99-4b51-bec9-2e6619cdbc63\") " Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.972629 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-utilities" (OuterVolumeSpecName: "utilities") pod "2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" (UID: "2bfef4b4-9e99-4b51-bec9-2e6619cdbc63"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.975230 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-64ptg" Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.974333 4703 generic.go:334] "Generic (PLEG): container finished" podID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" containerID="b0ccce95a18e7ebdacfe38059008165099e4f3c7a59933364b5fb446bf74072d" exitCode=0 Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.976857 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64ptg" event={"ID":"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa","Type":"ContainerDied","Data":"b0ccce95a18e7ebdacfe38059008165099e4f3c7a59933364b5fb446bf74072d"} Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.976912 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-64ptg" event={"ID":"7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa","Type":"ContainerDied","Data":"4e7c0dcab3fa46b2360edcd0c722ae4ccdfcfbd94b57af3ae49051809da2e596"} Jan 30 12:03:07 crc kubenswrapper[4703]: I0130 12:03:07.983260 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-kube-api-access-546ch" (OuterVolumeSpecName: "kube-api-access-546ch") pod "2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" (UID: "2bfef4b4-9e99-4b51-bec9-2e6619cdbc63"). InnerVolumeSpecName "kube-api-access-546ch". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.045891 4703 scope.go:117] "RemoveContainer" containerID="4b2f7b5b5fadfb4b826eac54567977d6c51b0a4f2aa7b51017a43548b7c989c3" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.057741 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-64ptg"] Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.075620 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-64ptg"] Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.076700 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-546ch\" (UniqueName: \"kubernetes.io/projected/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-kube-api-access-546ch\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.076783 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.086337 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" (UID: "2bfef4b4-9e99-4b51-bec9-2e6619cdbc63"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.126476 4703 scope.go:117] "RemoveContainer" containerID="7066c37d6bbb70531ff839e9219241b538003a23fe4dad465901a92ab32b09b0" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.163171 4703 scope.go:117] "RemoveContainer" containerID="2c3df39413546964e4c718057c5c9dec2ae14b2ff21a563454e4a895c84b197f" Jan 30 12:03:08 crc kubenswrapper[4703]: E0130 12:03:08.164011 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c3df39413546964e4c718057c5c9dec2ae14b2ff21a563454e4a895c84b197f\": container with ID starting with 2c3df39413546964e4c718057c5c9dec2ae14b2ff21a563454e4a895c84b197f not found: ID does not exist" containerID="2c3df39413546964e4c718057c5c9dec2ae14b2ff21a563454e4a895c84b197f" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.164064 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c3df39413546964e4c718057c5c9dec2ae14b2ff21a563454e4a895c84b197f"} err="failed to get container status \"2c3df39413546964e4c718057c5c9dec2ae14b2ff21a563454e4a895c84b197f\": rpc error: code = NotFound desc = could not find container \"2c3df39413546964e4c718057c5c9dec2ae14b2ff21a563454e4a895c84b197f\": container with ID starting with 2c3df39413546964e4c718057c5c9dec2ae14b2ff21a563454e4a895c84b197f not found: ID does not exist" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.164107 4703 scope.go:117] "RemoveContainer" containerID="4b2f7b5b5fadfb4b826eac54567977d6c51b0a4f2aa7b51017a43548b7c989c3" Jan 30 12:03:08 crc kubenswrapper[4703]: E0130 12:03:08.164678 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b2f7b5b5fadfb4b826eac54567977d6c51b0a4f2aa7b51017a43548b7c989c3\": container with ID starting with 4b2f7b5b5fadfb4b826eac54567977d6c51b0a4f2aa7b51017a43548b7c989c3 not found: ID does not exist" containerID="4b2f7b5b5fadfb4b826eac54567977d6c51b0a4f2aa7b51017a43548b7c989c3" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.164720 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b2f7b5b5fadfb4b826eac54567977d6c51b0a4f2aa7b51017a43548b7c989c3"} err="failed to get container status \"4b2f7b5b5fadfb4b826eac54567977d6c51b0a4f2aa7b51017a43548b7c989c3\": rpc error: code = NotFound desc = could not find container \"4b2f7b5b5fadfb4b826eac54567977d6c51b0a4f2aa7b51017a43548b7c989c3\": container with ID starting with 4b2f7b5b5fadfb4b826eac54567977d6c51b0a4f2aa7b51017a43548b7c989c3 not found: ID does not exist" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.164741 4703 scope.go:117] "RemoveContainer" containerID="7066c37d6bbb70531ff839e9219241b538003a23fe4dad465901a92ab32b09b0" Jan 30 12:03:08 crc kubenswrapper[4703]: E0130 12:03:08.164960 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7066c37d6bbb70531ff839e9219241b538003a23fe4dad465901a92ab32b09b0\": container with ID starting with 7066c37d6bbb70531ff839e9219241b538003a23fe4dad465901a92ab32b09b0 not found: ID does not exist" containerID="7066c37d6bbb70531ff839e9219241b538003a23fe4dad465901a92ab32b09b0" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.164984 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7066c37d6bbb70531ff839e9219241b538003a23fe4dad465901a92ab32b09b0"} err="failed to get container status \"7066c37d6bbb70531ff839e9219241b538003a23fe4dad465901a92ab32b09b0\": rpc error: code = NotFound desc = could not find container \"7066c37d6bbb70531ff839e9219241b538003a23fe4dad465901a92ab32b09b0\": container with ID starting with 7066c37d6bbb70531ff839e9219241b538003a23fe4dad465901a92ab32b09b0 not found: ID does not exist" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.165000 4703 scope.go:117] "RemoveContainer" containerID="b0ccce95a18e7ebdacfe38059008165099e4f3c7a59933364b5fb446bf74072d" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.178953 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.185810 4703 scope.go:117] "RemoveContainer" containerID="a9cb12f005c18cefa1f8d06258958a47cd9dbae77b07b9321f3a63b0c2201f97" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.225752 4703 scope.go:117] "RemoveContainer" containerID="e45e9f34f0a242323f995d20efb819515d38699870860e3d0cd21a497215d424" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.250638 4703 scope.go:117] "RemoveContainer" containerID="b0ccce95a18e7ebdacfe38059008165099e4f3c7a59933364b5fb446bf74072d" Jan 30 12:03:08 crc kubenswrapper[4703]: E0130 12:03:08.251267 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b0ccce95a18e7ebdacfe38059008165099e4f3c7a59933364b5fb446bf74072d\": container with ID starting with b0ccce95a18e7ebdacfe38059008165099e4f3c7a59933364b5fb446bf74072d not found: ID does not exist" containerID="b0ccce95a18e7ebdacfe38059008165099e4f3c7a59933364b5fb446bf74072d" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.251304 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b0ccce95a18e7ebdacfe38059008165099e4f3c7a59933364b5fb446bf74072d"} err="failed to get container status \"b0ccce95a18e7ebdacfe38059008165099e4f3c7a59933364b5fb446bf74072d\": rpc error: code = NotFound desc = could not find container \"b0ccce95a18e7ebdacfe38059008165099e4f3c7a59933364b5fb446bf74072d\": container with ID starting with b0ccce95a18e7ebdacfe38059008165099e4f3c7a59933364b5fb446bf74072d not found: ID does not exist" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.251337 4703 scope.go:117] "RemoveContainer" containerID="a9cb12f005c18cefa1f8d06258958a47cd9dbae77b07b9321f3a63b0c2201f97" Jan 30 12:03:08 crc kubenswrapper[4703]: E0130 12:03:08.251862 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9cb12f005c18cefa1f8d06258958a47cd9dbae77b07b9321f3a63b0c2201f97\": container with ID starting with a9cb12f005c18cefa1f8d06258958a47cd9dbae77b07b9321f3a63b0c2201f97 not found: ID does not exist" containerID="a9cb12f005c18cefa1f8d06258958a47cd9dbae77b07b9321f3a63b0c2201f97" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.251933 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9cb12f005c18cefa1f8d06258958a47cd9dbae77b07b9321f3a63b0c2201f97"} err="failed to get container status \"a9cb12f005c18cefa1f8d06258958a47cd9dbae77b07b9321f3a63b0c2201f97\": rpc error: code = NotFound desc = could not find container \"a9cb12f005c18cefa1f8d06258958a47cd9dbae77b07b9321f3a63b0c2201f97\": container with ID starting with a9cb12f005c18cefa1f8d06258958a47cd9dbae77b07b9321f3a63b0c2201f97 not found: ID does not exist" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.251987 4703 scope.go:117] "RemoveContainer" containerID="e45e9f34f0a242323f995d20efb819515d38699870860e3d0cd21a497215d424" Jan 30 12:03:08 crc kubenswrapper[4703]: E0130 12:03:08.252694 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e45e9f34f0a242323f995d20efb819515d38699870860e3d0cd21a497215d424\": container with ID starting with e45e9f34f0a242323f995d20efb819515d38699870860e3d0cd21a497215d424 not found: ID does not exist" containerID="e45e9f34f0a242323f995d20efb819515d38699870860e3d0cd21a497215d424" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.252725 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e45e9f34f0a242323f995d20efb819515d38699870860e3d0cd21a497215d424"} err="failed to get container status \"e45e9f34f0a242323f995d20efb819515d38699870860e3d0cd21a497215d424\": rpc error: code = NotFound desc = could not find container \"e45e9f34f0a242323f995d20efb819515d38699870860e3d0cd21a497215d424\": container with ID starting with e45e9f34f0a242323f995d20efb819515d38699870860e3d0cd21a497215d424 not found: ID does not exist" Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.309683 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-8gf7w"] Jan 30 12:03:08 crc kubenswrapper[4703]: I0130 12:03:08.315990 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-8gf7w"] Jan 30 12:03:09 crc kubenswrapper[4703]: I0130 12:03:09.094133 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" path="/var/lib/kubelet/pods/2bfef4b4-9e99-4b51-bec9-2e6619cdbc63/volumes" Jan 30 12:03:09 crc kubenswrapper[4703]: I0130 12:03:09.095071 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" path="/var/lib/kubelet/pods/7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa/volumes" Jan 30 12:03:09 crc kubenswrapper[4703]: I0130 12:03:09.211585 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6"] Jan 30 12:03:09 crc kubenswrapper[4703]: I0130 12:03:09.359045 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xzz7n"] Jan 30 12:03:09 crc kubenswrapper[4703]: I0130 12:03:09.359422 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xzz7n" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" containerName="registry-server" containerID="cri-o://6e5e476cf7a0129cfa7252f7d13fff89d9f0cc2fd586ec06ae55e1644dbe055b" gracePeriod=2 Jan 30 12:03:09 crc kubenswrapper[4703]: I0130 12:03:09.998298 4703 generic.go:334] "Generic (PLEG): container finished" podID="61693ed0-e352-4c89-9076-be1acb1a0bfe" containerID="6e5e476cf7a0129cfa7252f7d13fff89d9f0cc2fd586ec06ae55e1644dbe055b" exitCode=0 Jan 30 12:03:09 crc kubenswrapper[4703]: I0130 12:03:09.999197 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" podUID="65a008e3-b446-48cb-8be5-d2ea94b456e2" containerName="route-controller-manager" containerID="cri-o://a58db7a8b53900cdabb2441a13de541629274b3baab9c92543ae23bc7030f14a" gracePeriod=30 Jan 30 12:03:09 crc kubenswrapper[4703]: I0130 12:03:09.999621 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xzz7n" event={"ID":"61693ed0-e352-4c89-9076-be1acb1a0bfe","Type":"ContainerDied","Data":"6e5e476cf7a0129cfa7252f7d13fff89d9f0cc2fd586ec06ae55e1644dbe055b"} Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.370776 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xzz7n" Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.416346 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61693ed0-e352-4c89-9076-be1acb1a0bfe-utilities\") pod \"61693ed0-e352-4c89-9076-be1acb1a0bfe\" (UID: \"61693ed0-e352-4c89-9076-be1acb1a0bfe\") " Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.416437 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p75v5\" (UniqueName: \"kubernetes.io/projected/61693ed0-e352-4c89-9076-be1acb1a0bfe-kube-api-access-p75v5\") pod \"61693ed0-e352-4c89-9076-be1acb1a0bfe\" (UID: \"61693ed0-e352-4c89-9076-be1acb1a0bfe\") " Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.416516 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61693ed0-e352-4c89-9076-be1acb1a0bfe-catalog-content\") pod \"61693ed0-e352-4c89-9076-be1acb1a0bfe\" (UID: \"61693ed0-e352-4c89-9076-be1acb1a0bfe\") " Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.418237 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61693ed0-e352-4c89-9076-be1acb1a0bfe-utilities" (OuterVolumeSpecName: "utilities") pod "61693ed0-e352-4c89-9076-be1acb1a0bfe" (UID: "61693ed0-e352-4c89-9076-be1acb1a0bfe"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.419532 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61693ed0-e352-4c89-9076-be1acb1a0bfe-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.443302 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61693ed0-e352-4c89-9076-be1acb1a0bfe-kube-api-access-p75v5" (OuterVolumeSpecName: "kube-api-access-p75v5") pod "61693ed0-e352-4c89-9076-be1acb1a0bfe" (UID: "61693ed0-e352-4c89-9076-be1acb1a0bfe"). InnerVolumeSpecName "kube-api-access-p75v5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.521702 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p75v5\" (UniqueName: \"kubernetes.io/projected/61693ed0-e352-4c89-9076-be1acb1a0bfe-kube-api-access-p75v5\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.563515 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.581180 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61693ed0-e352-4c89-9076-be1acb1a0bfe-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "61693ed0-e352-4c89-9076-be1acb1a0bfe" (UID: "61693ed0-e352-4c89-9076-be1acb1a0bfe"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.624562 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/65a008e3-b446-48cb-8be5-d2ea94b456e2-client-ca\") pod \"65a008e3-b446-48cb-8be5-d2ea94b456e2\" (UID: \"65a008e3-b446-48cb-8be5-d2ea94b456e2\") " Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.624633 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65a008e3-b446-48cb-8be5-d2ea94b456e2-config\") pod \"65a008e3-b446-48cb-8be5-d2ea94b456e2\" (UID: \"65a008e3-b446-48cb-8be5-d2ea94b456e2\") " Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.624679 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/65a008e3-b446-48cb-8be5-d2ea94b456e2-serving-cert\") pod \"65a008e3-b446-48cb-8be5-d2ea94b456e2\" (UID: \"65a008e3-b446-48cb-8be5-d2ea94b456e2\") " Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.624798 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snq7f\" (UniqueName: \"kubernetes.io/projected/65a008e3-b446-48cb-8be5-d2ea94b456e2-kube-api-access-snq7f\") pod \"65a008e3-b446-48cb-8be5-d2ea94b456e2\" (UID: \"65a008e3-b446-48cb-8be5-d2ea94b456e2\") " Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.625438 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65a008e3-b446-48cb-8be5-d2ea94b456e2-client-ca" (OuterVolumeSpecName: "client-ca") pod "65a008e3-b446-48cb-8be5-d2ea94b456e2" (UID: "65a008e3-b446-48cb-8be5-d2ea94b456e2"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.625621 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65a008e3-b446-48cb-8be5-d2ea94b456e2-config" (OuterVolumeSpecName: "config") pod "65a008e3-b446-48cb-8be5-d2ea94b456e2" (UID: "65a008e3-b446-48cb-8be5-d2ea94b456e2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.626480 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61693ed0-e352-4c89-9076-be1acb1a0bfe-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.626506 4703 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/65a008e3-b446-48cb-8be5-d2ea94b456e2-client-ca\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.626519 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/65a008e3-b446-48cb-8be5-d2ea94b456e2-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.630153 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65a008e3-b446-48cb-8be5-d2ea94b456e2-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "65a008e3-b446-48cb-8be5-d2ea94b456e2" (UID: "65a008e3-b446-48cb-8be5-d2ea94b456e2"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.630255 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65a008e3-b446-48cb-8be5-d2ea94b456e2-kube-api-access-snq7f" (OuterVolumeSpecName: "kube-api-access-snq7f") pod "65a008e3-b446-48cb-8be5-d2ea94b456e2" (UID: "65a008e3-b446-48cb-8be5-d2ea94b456e2"). InnerVolumeSpecName "kube-api-access-snq7f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.728771 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-snq7f\" (UniqueName: \"kubernetes.io/projected/65a008e3-b446-48cb-8be5-d2ea94b456e2-kube-api-access-snq7f\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:10 crc kubenswrapper[4703]: I0130 12:03:10.728826 4703 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/65a008e3-b446-48cb-8be5-d2ea94b456e2-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:11 crc kubenswrapper[4703]: I0130 12:03:11.008254 4703 generic.go:334] "Generic (PLEG): container finished" podID="65a008e3-b446-48cb-8be5-d2ea94b456e2" containerID="a58db7a8b53900cdabb2441a13de541629274b3baab9c92543ae23bc7030f14a" exitCode=0 Jan 30 12:03:11 crc kubenswrapper[4703]: I0130 12:03:11.008349 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" event={"ID":"65a008e3-b446-48cb-8be5-d2ea94b456e2","Type":"ContainerDied","Data":"a58db7a8b53900cdabb2441a13de541629274b3baab9c92543ae23bc7030f14a"} Jan 30 12:03:11 crc kubenswrapper[4703]: I0130 12:03:11.008396 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" event={"ID":"65a008e3-b446-48cb-8be5-d2ea94b456e2","Type":"ContainerDied","Data":"b18589a4405172b2627d61ae00c3114a2fb9b07c5f8485a37c0b40324de2d752"} Jan 30 12:03:11 crc kubenswrapper[4703]: I0130 12:03:11.008440 4703 scope.go:117] "RemoveContainer" containerID="a58db7a8b53900cdabb2441a13de541629274b3baab9c92543ae23bc7030f14a" Jan 30 12:03:11 crc kubenswrapper[4703]: I0130 12:03:11.008594 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6" Jan 30 12:03:11 crc kubenswrapper[4703]: I0130 12:03:11.016941 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xzz7n" event={"ID":"61693ed0-e352-4c89-9076-be1acb1a0bfe","Type":"ContainerDied","Data":"f5f689484cfb75969390d075c7b5bb0ef3d1e662582c9b78ecbd77839999d1be"} Jan 30 12:03:11 crc kubenswrapper[4703]: I0130 12:03:11.017243 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xzz7n" Jan 30 12:03:11 crc kubenswrapper[4703]: I0130 12:03:11.049626 4703 scope.go:117] "RemoveContainer" containerID="a58db7a8b53900cdabb2441a13de541629274b3baab9c92543ae23bc7030f14a" Jan 30 12:03:11 crc kubenswrapper[4703]: E0130 12:03:11.051388 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a58db7a8b53900cdabb2441a13de541629274b3baab9c92543ae23bc7030f14a\": container with ID starting with a58db7a8b53900cdabb2441a13de541629274b3baab9c92543ae23bc7030f14a not found: ID does not exist" containerID="a58db7a8b53900cdabb2441a13de541629274b3baab9c92543ae23bc7030f14a" Jan 30 12:03:11 crc kubenswrapper[4703]: I0130 12:03:11.051460 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a58db7a8b53900cdabb2441a13de541629274b3baab9c92543ae23bc7030f14a"} err="failed to get container status \"a58db7a8b53900cdabb2441a13de541629274b3baab9c92543ae23bc7030f14a\": rpc error: code = NotFound desc = could not find container \"a58db7a8b53900cdabb2441a13de541629274b3baab9c92543ae23bc7030f14a\": container with ID starting with a58db7a8b53900cdabb2441a13de541629274b3baab9c92543ae23bc7030f14a not found: ID does not exist" Jan 30 12:03:11 crc kubenswrapper[4703]: I0130 12:03:11.051538 4703 scope.go:117] "RemoveContainer" containerID="6e5e476cf7a0129cfa7252f7d13fff89d9f0cc2fd586ec06ae55e1644dbe055b" Jan 30 12:03:11 crc kubenswrapper[4703]: I0130 12:03:11.080688 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xzz7n"] Jan 30 12:03:11 crc kubenswrapper[4703]: I0130 12:03:11.089597 4703 scope.go:117] "RemoveContainer" containerID="dc891aede133308f7a05c165f64c0f92eb01e9fd7bda3d7232bb3ac9d9fc896b" Jan 30 12:03:11 crc kubenswrapper[4703]: I0130 12:03:11.118290 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xzz7n"] Jan 30 12:03:11 crc kubenswrapper[4703]: I0130 12:03:11.138994 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6"] Jan 30 12:03:11 crc kubenswrapper[4703]: I0130 12:03:11.141532 4703 scope.go:117] "RemoveContainer" containerID="818e0bdb0de312f7e96185a7d002cbcdf59cecc454140511cb6e3890d0b8ea75" Jan 30 12:03:11 crc kubenswrapper[4703]: I0130 12:03:11.146077 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7fdf5f4875-7wtr6"] Jan 30 12:03:12 crc kubenswrapper[4703]: I0130 12:03:12.823456 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:03:12 crc kubenswrapper[4703]: I0130 12:03:12.824150 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.096577 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" path="/var/lib/kubelet/pods/61693ed0-e352-4c89-9076-be1acb1a0bfe/volumes" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.097485 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65a008e3-b446-48cb-8be5-d2ea94b456e2" path="/var/lib/kubelet/pods/65a008e3-b446-48cb-8be5-d2ea94b456e2/volumes" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.276777 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr"] Jan 30 12:03:13 crc kubenswrapper[4703]: E0130 12:03:13.277159 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" containerName="extract-utilities" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.277180 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" containerName="extract-utilities" Jan 30 12:03:13 crc kubenswrapper[4703]: E0130 12:03:13.277197 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" containerName="extract-content" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.277206 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" containerName="extract-content" Jan 30 12:03:13 crc kubenswrapper[4703]: E0130 12:03:13.277215 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" containerName="registry-server" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.277224 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" containerName="registry-server" Jan 30 12:03:13 crc kubenswrapper[4703]: E0130 12:03:13.277239 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" containerName="extract-content" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.277249 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" containerName="extract-content" Jan 30 12:03:13 crc kubenswrapper[4703]: E0130 12:03:13.277265 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" containerName="registry-server" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.277274 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" containerName="registry-server" Jan 30 12:03:13 crc kubenswrapper[4703]: E0130 12:03:13.277283 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" containerName="registry-server" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.277292 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" containerName="registry-server" Jan 30 12:03:13 crc kubenswrapper[4703]: E0130 12:03:13.277309 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" containerName="extract-utilities" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.277320 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" containerName="extract-utilities" Jan 30 12:03:13 crc kubenswrapper[4703]: E0130 12:03:13.277332 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65a008e3-b446-48cb-8be5-d2ea94b456e2" containerName="route-controller-manager" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.277340 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="65a008e3-b446-48cb-8be5-d2ea94b456e2" containerName="route-controller-manager" Jan 30 12:03:13 crc kubenswrapper[4703]: E0130 12:03:13.277350 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" containerName="extract-content" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.277358 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" containerName="extract-content" Jan 30 12:03:13 crc kubenswrapper[4703]: E0130 12:03:13.277368 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" containerName="extract-utilities" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.277377 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" containerName="extract-utilities" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.277536 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f49bb47-b2fe-4b1b-a473-a5c25c5d99fa" containerName="registry-server" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.277554 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="61693ed0-e352-4c89-9076-be1acb1a0bfe" containerName="registry-server" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.277567 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="65a008e3-b446-48cb-8be5-d2ea94b456e2" containerName="route-controller-manager" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.277585 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bfef4b4-9e99-4b51-bec9-2e6619cdbc63" containerName="registry-server" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.278257 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.281844 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.281925 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.281863 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.282245 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.282623 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.285312 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.302620 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr"] Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.372600 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-dk9mj" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.416340 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d36ae37-fe06-4368-a4c2-40e716e6b9c7-config\") pod \"route-controller-manager-869599d4f6-c9jqr\" (UID: \"5d36ae37-fe06-4368-a4c2-40e716e6b9c7\") " pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.416428 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5d36ae37-fe06-4368-a4c2-40e716e6b9c7-client-ca\") pod \"route-controller-manager-869599d4f6-c9jqr\" (UID: \"5d36ae37-fe06-4368-a4c2-40e716e6b9c7\") " pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.416517 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5d36ae37-fe06-4368-a4c2-40e716e6b9c7-serving-cert\") pod \"route-controller-manager-869599d4f6-c9jqr\" (UID: \"5d36ae37-fe06-4368-a4c2-40e716e6b9c7\") " pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.416642 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjwjq\" (UniqueName: \"kubernetes.io/projected/5d36ae37-fe06-4368-a4c2-40e716e6b9c7-kube-api-access-tjwjq\") pod \"route-controller-manager-869599d4f6-c9jqr\" (UID: \"5d36ae37-fe06-4368-a4c2-40e716e6b9c7\") " pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.491219 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2r4b9"] Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.518744 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5d36ae37-fe06-4368-a4c2-40e716e6b9c7-serving-cert\") pod \"route-controller-manager-869599d4f6-c9jqr\" (UID: \"5d36ae37-fe06-4368-a4c2-40e716e6b9c7\") " pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.518853 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjwjq\" (UniqueName: \"kubernetes.io/projected/5d36ae37-fe06-4368-a4c2-40e716e6b9c7-kube-api-access-tjwjq\") pod \"route-controller-manager-869599d4f6-c9jqr\" (UID: \"5d36ae37-fe06-4368-a4c2-40e716e6b9c7\") " pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.518992 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d36ae37-fe06-4368-a4c2-40e716e6b9c7-config\") pod \"route-controller-manager-869599d4f6-c9jqr\" (UID: \"5d36ae37-fe06-4368-a4c2-40e716e6b9c7\") " pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.519048 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5d36ae37-fe06-4368-a4c2-40e716e6b9c7-client-ca\") pod \"route-controller-manager-869599d4f6-c9jqr\" (UID: \"5d36ae37-fe06-4368-a4c2-40e716e6b9c7\") " pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.521346 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5d36ae37-fe06-4368-a4c2-40e716e6b9c7-config\") pod \"route-controller-manager-869599d4f6-c9jqr\" (UID: \"5d36ae37-fe06-4368-a4c2-40e716e6b9c7\") " pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.523373 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5d36ae37-fe06-4368-a4c2-40e716e6b9c7-client-ca\") pod \"route-controller-manager-869599d4f6-c9jqr\" (UID: \"5d36ae37-fe06-4368-a4c2-40e716e6b9c7\") " pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.544847 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjwjq\" (UniqueName: \"kubernetes.io/projected/5d36ae37-fe06-4368-a4c2-40e716e6b9c7-kube-api-access-tjwjq\") pod \"route-controller-manager-869599d4f6-c9jqr\" (UID: \"5d36ae37-fe06-4368-a4c2-40e716e6b9c7\") " pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.555546 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5d36ae37-fe06-4368-a4c2-40e716e6b9c7-serving-cert\") pod \"route-controller-manager-869599d4f6-c9jqr\" (UID: \"5d36ae37-fe06-4368-a4c2-40e716e6b9c7\") " pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" Jan 30 12:03:13 crc kubenswrapper[4703]: I0130 12:03:13.601012 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" Jan 30 12:03:14 crc kubenswrapper[4703]: I0130 12:03:14.056540 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr"] Jan 30 12:03:15 crc kubenswrapper[4703]: I0130 12:03:15.052554 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" event={"ID":"5d36ae37-fe06-4368-a4c2-40e716e6b9c7","Type":"ContainerStarted","Data":"fef16de131ae9f134df614cb3ab1874dbfbfcfea49ab494848e91bc7d4f727bf"} Jan 30 12:03:15 crc kubenswrapper[4703]: I0130 12:03:15.053983 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" Jan 30 12:03:15 crc kubenswrapper[4703]: I0130 12:03:15.054016 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" event={"ID":"5d36ae37-fe06-4368-a4c2-40e716e6b9c7","Type":"ContainerStarted","Data":"54ee98a668cfdbf2b7e3986c72ba186f53f803272cff98a10c6a34a6232a7c7a"} Jan 30 12:03:15 crc kubenswrapper[4703]: I0130 12:03:15.066238 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" Jan 30 12:03:15 crc kubenswrapper[4703]: I0130 12:03:15.101996 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-869599d4f6-c9jqr" podStartSLOduration=6.101961287 podStartE2EDuration="6.101961287s" podCreationTimestamp="2026-01-30 12:03:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:03:15.084970247 +0000 UTC m=+430.862791911" watchObservedRunningTime="2026-01-30 12:03:15.101961287 +0000 UTC m=+430.879782961" Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.641784 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rsvj7"] Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.653697 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-rsvj7" podUID="11176772-9170-499f-8fec-e460709fd300" containerName="registry-server" containerID="cri-o://a5a7f2010f118c76418a0a48c6bf2f8ec31fb24a941be46c547f68004ee5bfd3" gracePeriod=30 Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.682305 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-988gc"] Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.683517 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-988gc" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" containerName="registry-server" containerID="cri-o://8691c8a86fadd5c5538c351f9612fb44f9fe4406c93e911ea9c3a147c3f582fe" gracePeriod=30 Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.691291 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wd7g9"] Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.691715 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" podUID="26730f5b-96f6-40f6-ab66-0500a306f988" containerName="marketplace-operator" containerID="cri-o://faa23bfdc48b30d1d1dd1a23b9a6735ff7b13b8a16d2d63ba105d51c85c0d076" gracePeriod=30 Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.699334 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-m9qpx"] Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.699968 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-m9qpx" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" containerName="registry-server" containerID="cri-o://93821994e6d9e59d41559be2ba22c413f31d7797677c1b229d528b1ea61ab79a" gracePeriod=30 Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.702463 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2wfxz"] Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.709789 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2wfxz" Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.717085 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f0dd4153-47cd-40a1-b929-69ecba1b33f4-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2wfxz\" (UID: \"f0dd4153-47cd-40a1-b929-69ecba1b33f4\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wfxz" Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.717210 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f0dd4153-47cd-40a1-b929-69ecba1b33f4-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2wfxz\" (UID: \"f0dd4153-47cd-40a1-b929-69ecba1b33f4\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wfxz" Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.717281 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9br2\" (UniqueName: \"kubernetes.io/projected/f0dd4153-47cd-40a1-b929-69ecba1b33f4-kube-api-access-d9br2\") pod \"marketplace-operator-79b997595-2wfxz\" (UID: \"f0dd4153-47cd-40a1-b929-69ecba1b33f4\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wfxz" Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.726394 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ngspg"] Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.726865 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ngspg" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" containerName="registry-server" containerID="cri-o://3ad1813ed1716fa3c61d57cb0eb17f0f73685562b8aa6e0a68ceeb80bdb4f3d7" gracePeriod=30 Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.731358 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2wfxz"] Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.820364 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f0dd4153-47cd-40a1-b929-69ecba1b33f4-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2wfxz\" (UID: \"f0dd4153-47cd-40a1-b929-69ecba1b33f4\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wfxz" Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.821201 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f0dd4153-47cd-40a1-b929-69ecba1b33f4-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2wfxz\" (UID: \"f0dd4153-47cd-40a1-b929-69ecba1b33f4\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wfxz" Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.821311 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9br2\" (UniqueName: \"kubernetes.io/projected/f0dd4153-47cd-40a1-b929-69ecba1b33f4-kube-api-access-d9br2\") pod \"marketplace-operator-79b997595-2wfxz\" (UID: \"f0dd4153-47cd-40a1-b929-69ecba1b33f4\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wfxz" Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.824574 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/f0dd4153-47cd-40a1-b929-69ecba1b33f4-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-2wfxz\" (UID: \"f0dd4153-47cd-40a1-b929-69ecba1b33f4\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wfxz" Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.838328 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/f0dd4153-47cd-40a1-b929-69ecba1b33f4-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-2wfxz\" (UID: \"f0dd4153-47cd-40a1-b929-69ecba1b33f4\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wfxz" Jan 30 12:03:22 crc kubenswrapper[4703]: I0130 12:03:22.851858 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9br2\" (UniqueName: \"kubernetes.io/projected/f0dd4153-47cd-40a1-b929-69ecba1b33f4-kube-api-access-d9br2\") pod \"marketplace-operator-79b997595-2wfxz\" (UID: \"f0dd4153-47cd-40a1-b929-69ecba1b33f4\") " pod="openshift-marketplace/marketplace-operator-79b997595-2wfxz" Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.046962 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-2wfxz" Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.123962 4703 generic.go:334] "Generic (PLEG): container finished" podID="f49515e4-f9bb-4741-a979-5d59fbc7198d" containerID="93821994e6d9e59d41559be2ba22c413f31d7797677c1b229d528b1ea61ab79a" exitCode=0 Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.124088 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m9qpx" event={"ID":"f49515e4-f9bb-4741-a979-5d59fbc7198d","Type":"ContainerDied","Data":"93821994e6d9e59d41559be2ba22c413f31d7797677c1b229d528b1ea61ab79a"} Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.126563 4703 generic.go:334] "Generic (PLEG): container finished" podID="26730f5b-96f6-40f6-ab66-0500a306f988" containerID="faa23bfdc48b30d1d1dd1a23b9a6735ff7b13b8a16d2d63ba105d51c85c0d076" exitCode=0 Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.126627 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" event={"ID":"26730f5b-96f6-40f6-ab66-0500a306f988","Type":"ContainerDied","Data":"faa23bfdc48b30d1d1dd1a23b9a6735ff7b13b8a16d2d63ba105d51c85c0d076"} Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.131668 4703 generic.go:334] "Generic (PLEG): container finished" podID="4e4915d0-912f-426c-9d74-3d42e36678ed" containerID="8691c8a86fadd5c5538c351f9612fb44f9fe4406c93e911ea9c3a147c3f582fe" exitCode=0 Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.131738 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-988gc" event={"ID":"4e4915d0-912f-426c-9d74-3d42e36678ed","Type":"ContainerDied","Data":"8691c8a86fadd5c5538c351f9612fb44f9fe4406c93e911ea9c3a147c3f582fe"} Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.137987 4703 generic.go:334] "Generic (PLEG): container finished" podID="11176772-9170-499f-8fec-e460709fd300" containerID="a5a7f2010f118c76418a0a48c6bf2f8ec31fb24a941be46c547f68004ee5bfd3" exitCode=0 Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.138062 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rsvj7" event={"ID":"11176772-9170-499f-8fec-e460709fd300","Type":"ContainerDied","Data":"a5a7f2010f118c76418a0a48c6bf2f8ec31fb24a941be46c547f68004ee5bfd3"} Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.436144 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rsvj7" Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.641287 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4tnmw\" (UniqueName: \"kubernetes.io/projected/11176772-9170-499f-8fec-e460709fd300-kube-api-access-4tnmw\") pod \"11176772-9170-499f-8fec-e460709fd300\" (UID: \"11176772-9170-499f-8fec-e460709fd300\") " Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.641578 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11176772-9170-499f-8fec-e460709fd300-catalog-content\") pod \"11176772-9170-499f-8fec-e460709fd300\" (UID: \"11176772-9170-499f-8fec-e460709fd300\") " Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.641696 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11176772-9170-499f-8fec-e460709fd300-utilities\") pod \"11176772-9170-499f-8fec-e460709fd300\" (UID: \"11176772-9170-499f-8fec-e460709fd300\") " Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.644529 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/11176772-9170-499f-8fec-e460709fd300-utilities" (OuterVolumeSpecName: "utilities") pod "11176772-9170-499f-8fec-e460709fd300" (UID: "11176772-9170-499f-8fec-e460709fd300"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.721298 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/11176772-9170-499f-8fec-e460709fd300-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "11176772-9170-499f-8fec-e460709fd300" (UID: "11176772-9170-499f-8fec-e460709fd300"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.733724 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11176772-9170-499f-8fec-e460709fd300-kube-api-access-4tnmw" (OuterVolumeSpecName: "kube-api-access-4tnmw") pod "11176772-9170-499f-8fec-e460709fd300" (UID: "11176772-9170-499f-8fec-e460709fd300"). InnerVolumeSpecName "kube-api-access-4tnmw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.745369 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4tnmw\" (UniqueName: \"kubernetes.io/projected/11176772-9170-499f-8fec-e460709fd300-kube-api-access-4tnmw\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.745419 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/11176772-9170-499f-8fec-e460709fd300-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.745434 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/11176772-9170-499f-8fec-e460709fd300-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.761186 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-988gc" Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.952321 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e4915d0-912f-426c-9d74-3d42e36678ed-catalog-content\") pod \"4e4915d0-912f-426c-9d74-3d42e36678ed\" (UID: \"4e4915d0-912f-426c-9d74-3d42e36678ed\") " Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.952464 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdwdv\" (UniqueName: \"kubernetes.io/projected/4e4915d0-912f-426c-9d74-3d42e36678ed-kube-api-access-cdwdv\") pod \"4e4915d0-912f-426c-9d74-3d42e36678ed\" (UID: \"4e4915d0-912f-426c-9d74-3d42e36678ed\") " Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.952729 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e4915d0-912f-426c-9d74-3d42e36678ed-utilities\") pod \"4e4915d0-912f-426c-9d74-3d42e36678ed\" (UID: \"4e4915d0-912f-426c-9d74-3d42e36678ed\") " Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.955147 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e4915d0-912f-426c-9d74-3d42e36678ed-utilities" (OuterVolumeSpecName: "utilities") pod "4e4915d0-912f-426c-9d74-3d42e36678ed" (UID: "4e4915d0-912f-426c-9d74-3d42e36678ed"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:03:23 crc kubenswrapper[4703]: I0130 12:03:23.974540 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e4915d0-912f-426c-9d74-3d42e36678ed-kube-api-access-cdwdv" (OuterVolumeSpecName: "kube-api-access-cdwdv") pod "4e4915d0-912f-426c-9d74-3d42e36678ed" (UID: "4e4915d0-912f-426c-9d74-3d42e36678ed"). InnerVolumeSpecName "kube-api-access-cdwdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.056739 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e4915d0-912f-426c-9d74-3d42e36678ed-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.057551 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdwdv\" (UniqueName: \"kubernetes.io/projected/4e4915d0-912f-426c-9d74-3d42e36678ed-kube-api-access-cdwdv\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.064281 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e4915d0-912f-426c-9d74-3d42e36678ed-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4e4915d0-912f-426c-9d74-3d42e36678ed" (UID: "4e4915d0-912f-426c-9d74-3d42e36678ed"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.094416 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-2wfxz"] Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.150965 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m9qpx" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.155051 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-988gc" event={"ID":"4e4915d0-912f-426c-9d74-3d42e36678ed","Type":"ContainerDied","Data":"1822eba416a2314ed01f7393600216cfef79070903e948de31cac84a88df5bc8"} Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.155160 4703 scope.go:117] "RemoveContainer" containerID="8691c8a86fadd5c5538c351f9612fb44f9fe4406c93e911ea9c3a147c3f582fe" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.156617 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-988gc" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.158375 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rsvj7" event={"ID":"11176772-9170-499f-8fec-e460709fd300","Type":"ContainerDied","Data":"ff8a7b163e69eb61ab5953daa2df4630cd0f23723ab56084e54382f8639e0f0f"} Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.158580 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rsvj7" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.159202 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e4915d0-912f-426c-9d74-3d42e36678ed-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.162334 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.164979 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-m9qpx" event={"ID":"f49515e4-f9bb-4741-a979-5d59fbc7198d","Type":"ContainerDied","Data":"af4e235e0a135e9a9db7394ee3f358f8e2b4b67ee4943edd9c52e77e90423d5e"} Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.165164 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-m9qpx" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.167872 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2wfxz" event={"ID":"f0dd4153-47cd-40a1-b929-69ecba1b33f4","Type":"ContainerStarted","Data":"c0745f61f46103eaf0578171911efa4294a71d6e65ee8c33c84224bc08a8a847"} Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.171608 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" event={"ID":"26730f5b-96f6-40f6-ab66-0500a306f988","Type":"ContainerDied","Data":"ad84e63cc0a4b9de60398d3593e6e2350d5da667e18bfd16cc613919d070fbd3"} Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.171680 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wd7g9" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.301823 4703 scope.go:117] "RemoveContainer" containerID="041b55e84c7d7db02d9b79578766cbf5fd25f51b1427e3ea9adb7faaa663b309" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.311552 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xnjdm\" (UniqueName: \"kubernetes.io/projected/f49515e4-f9bb-4741-a979-5d59fbc7198d-kube-api-access-xnjdm\") pod \"f49515e4-f9bb-4741-a979-5d59fbc7198d\" (UID: \"f49515e4-f9bb-4741-a979-5d59fbc7198d\") " Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.312604 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f49515e4-f9bb-4741-a979-5d59fbc7198d-utilities\") pod \"f49515e4-f9bb-4741-a979-5d59fbc7198d\" (UID: \"f49515e4-f9bb-4741-a979-5d59fbc7198d\") " Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.314250 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/26730f5b-96f6-40f6-ab66-0500a306f988-marketplace-trusted-ca\") pod \"26730f5b-96f6-40f6-ab66-0500a306f988\" (UID: \"26730f5b-96f6-40f6-ab66-0500a306f988\") " Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.314941 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f49515e4-f9bb-4741-a979-5d59fbc7198d-catalog-content\") pod \"f49515e4-f9bb-4741-a979-5d59fbc7198d\" (UID: \"f49515e4-f9bb-4741-a979-5d59fbc7198d\") " Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.317644 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/26730f5b-96f6-40f6-ab66-0500a306f988-marketplace-operator-metrics\") pod \"26730f5b-96f6-40f6-ab66-0500a306f988\" (UID: \"26730f5b-96f6-40f6-ab66-0500a306f988\") " Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.319441 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f49515e4-f9bb-4741-a979-5d59fbc7198d-utilities" (OuterVolumeSpecName: "utilities") pod "f49515e4-f9bb-4741-a979-5d59fbc7198d" (UID: "f49515e4-f9bb-4741-a979-5d59fbc7198d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.322334 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26730f5b-96f6-40f6-ab66-0500a306f988-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "26730f5b-96f6-40f6-ab66-0500a306f988" (UID: "26730f5b-96f6-40f6-ab66-0500a306f988"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.326499 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f49515e4-f9bb-4741-a979-5d59fbc7198d-kube-api-access-xnjdm" (OuterVolumeSpecName: "kube-api-access-xnjdm") pod "f49515e4-f9bb-4741-a979-5d59fbc7198d" (UID: "f49515e4-f9bb-4741-a979-5d59fbc7198d"). InnerVolumeSpecName "kube-api-access-xnjdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.326537 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f49515e4-f9bb-4741-a979-5d59fbc7198d-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.326570 4703 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/26730f5b-96f6-40f6-ab66-0500a306f988-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.327288 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26730f5b-96f6-40f6-ab66-0500a306f988-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "26730f5b-96f6-40f6-ab66-0500a306f988" (UID: "26730f5b-96f6-40f6-ab66-0500a306f988"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.353837 4703 scope.go:117] "RemoveContainer" containerID="971c4ad73139720930d3ef27b04952062e9b9b1621fc36d165f152d0f949cf83" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.381056 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f49515e4-f9bb-4741-a979-5d59fbc7198d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f49515e4-f9bb-4741-a979-5d59fbc7198d" (UID: "f49515e4-f9bb-4741-a979-5d59fbc7198d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.382176 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-988gc"] Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.387513 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-988gc"] Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.396722 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rsvj7"] Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.398700 4703 scope.go:117] "RemoveContainer" containerID="a5a7f2010f118c76418a0a48c6bf2f8ec31fb24a941be46c547f68004ee5bfd3" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.402558 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-rsvj7"] Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.424792 4703 scope.go:117] "RemoveContainer" containerID="5a00c050e997338af2456848c7bbee7ddd83a616324a9054007734a834f5d68b" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.428691 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rcjr8\" (UniqueName: \"kubernetes.io/projected/26730f5b-96f6-40f6-ab66-0500a306f988-kube-api-access-rcjr8\") pod \"26730f5b-96f6-40f6-ab66-0500a306f988\" (UID: \"26730f5b-96f6-40f6-ab66-0500a306f988\") " Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.429432 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xnjdm\" (UniqueName: \"kubernetes.io/projected/f49515e4-f9bb-4741-a979-5d59fbc7198d-kube-api-access-xnjdm\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.429672 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f49515e4-f9bb-4741-a979-5d59fbc7198d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.429759 4703 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/26730f5b-96f6-40f6-ab66-0500a306f988-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.434538 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26730f5b-96f6-40f6-ab66-0500a306f988-kube-api-access-rcjr8" (OuterVolumeSpecName: "kube-api-access-rcjr8") pod "26730f5b-96f6-40f6-ab66-0500a306f988" (UID: "26730f5b-96f6-40f6-ab66-0500a306f988"). InnerVolumeSpecName "kube-api-access-rcjr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.465675 4703 scope.go:117] "RemoveContainer" containerID="f293446aff3a8701046f8d6de7eb394971e96b034800138e9c74d7b165f5bf88" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.484985 4703 scope.go:117] "RemoveContainer" containerID="93821994e6d9e59d41559be2ba22c413f31d7797677c1b229d528b1ea61ab79a" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.508046 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-m9qpx"] Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.512104 4703 scope.go:117] "RemoveContainer" containerID="8677f7b9085b8b9af6a49b39e0813aab117bab21083d4e079489139dd839f441" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.512818 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-m9qpx"] Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.519292 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wd7g9"] Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.526038 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wd7g9"] Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.531296 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rcjr8\" (UniqueName: \"kubernetes.io/projected/26730f5b-96f6-40f6-ab66-0500a306f988-kube-api-access-rcjr8\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.533356 4703 scope.go:117] "RemoveContainer" containerID="47a32128d7e654c4f7187e443fdc88a0be97522a72fc7542b62b4ab4f34f248a" Jan 30 12:03:24 crc kubenswrapper[4703]: I0130 12:03:24.552877 4703 scope.go:117] "RemoveContainer" containerID="faa23bfdc48b30d1d1dd1a23b9a6735ff7b13b8a16d2d63ba105d51c85c0d076" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.099006 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11176772-9170-499f-8fec-e460709fd300" path="/var/lib/kubelet/pods/11176772-9170-499f-8fec-e460709fd300/volumes" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.100312 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26730f5b-96f6-40f6-ab66-0500a306f988" path="/var/lib/kubelet/pods/26730f5b-96f6-40f6-ab66-0500a306f988/volumes" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.100880 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" path="/var/lib/kubelet/pods/4e4915d0-912f-426c-9d74-3d42e36678ed/volumes" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.102380 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" path="/var/lib/kubelet/pods/f49515e4-f9bb-4741-a979-5d59fbc7198d/volumes" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.190312 4703 generic.go:334] "Generic (PLEG): container finished" podID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" containerID="3ad1813ed1716fa3c61d57cb0eb17f0f73685562b8aa6e0a68ceeb80bdb4f3d7" exitCode=0 Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.190402 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ngspg" event={"ID":"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436","Type":"ContainerDied","Data":"3ad1813ed1716fa3c61d57cb0eb17f0f73685562b8aa6e0a68ceeb80bdb4f3d7"} Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.194363 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-2wfxz" event={"ID":"f0dd4153-47cd-40a1-b929-69ecba1b33f4","Type":"ContainerStarted","Data":"ec2574125fb01703240c19ef4d9f0157766555d39de29c14f1a1a8ed70f5a732"} Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.196382 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-2wfxz" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.224112 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-2wfxz" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.235566 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-2wfxz" podStartSLOduration=3.235534586 podStartE2EDuration="3.235534586s" podCreationTimestamp="2026-01-30 12:03:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:03:25.231678535 +0000 UTC m=+441.009500189" watchObservedRunningTime="2026-01-30 12:03:25.235534586 +0000 UTC m=+441.013356240" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.281999 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ngspg" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.347417 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8gvnv\" (UniqueName: \"kubernetes.io/projected/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-kube-api-access-8gvnv\") pod \"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436\" (UID: \"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436\") " Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.348355 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-catalog-content\") pod \"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436\" (UID: \"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436\") " Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.348445 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-utilities\") pod \"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436\" (UID: \"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436\") " Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.349774 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-utilities" (OuterVolumeSpecName: "utilities") pod "b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" (UID: "b8c48ae5-5f36-4ab5-b6a8-30e51be6d436"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.354578 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-kube-api-access-8gvnv" (OuterVolumeSpecName: "kube-api-access-8gvnv") pod "b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" (UID: "b8c48ae5-5f36-4ab5-b6a8-30e51be6d436"). InnerVolumeSpecName "kube-api-access-8gvnv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.377591 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5v2j7"] Jan 30 12:03:25 crc kubenswrapper[4703]: E0130 12:03:25.377944 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11176772-9170-499f-8fec-e460709fd300" containerName="extract-utilities" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.377963 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="11176772-9170-499f-8fec-e460709fd300" containerName="extract-utilities" Jan 30 12:03:25 crc kubenswrapper[4703]: E0130 12:03:25.377986 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" containerName="registry-server" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.377996 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" containerName="registry-server" Jan 30 12:03:25 crc kubenswrapper[4703]: E0130 12:03:25.378007 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11176772-9170-499f-8fec-e460709fd300" containerName="extract-content" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.378018 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="11176772-9170-499f-8fec-e460709fd300" containerName="extract-content" Jan 30 12:03:25 crc kubenswrapper[4703]: E0130 12:03:25.378036 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26730f5b-96f6-40f6-ab66-0500a306f988" containerName="marketplace-operator" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.378044 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="26730f5b-96f6-40f6-ab66-0500a306f988" containerName="marketplace-operator" Jan 30 12:03:25 crc kubenswrapper[4703]: E0130 12:03:25.378061 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" containerName="extract-content" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.378070 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" containerName="extract-content" Jan 30 12:03:25 crc kubenswrapper[4703]: E0130 12:03:25.378080 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" containerName="extract-content" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.378088 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" containerName="extract-content" Jan 30 12:03:25 crc kubenswrapper[4703]: E0130 12:03:25.378100 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11176772-9170-499f-8fec-e460709fd300" containerName="registry-server" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.378108 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="11176772-9170-499f-8fec-e460709fd300" containerName="registry-server" Jan 30 12:03:25 crc kubenswrapper[4703]: E0130 12:03:25.383088 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" containerName="registry-server" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.383167 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" containerName="registry-server" Jan 30 12:03:25 crc kubenswrapper[4703]: E0130 12:03:25.383202 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" containerName="registry-server" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.383213 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" containerName="registry-server" Jan 30 12:03:25 crc kubenswrapper[4703]: E0130 12:03:25.383225 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" containerName="extract-utilities" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.383235 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" containerName="extract-utilities" Jan 30 12:03:25 crc kubenswrapper[4703]: E0130 12:03:25.383246 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" containerName="extract-utilities" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.383254 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" containerName="extract-utilities" Jan 30 12:03:25 crc kubenswrapper[4703]: E0130 12:03:25.383270 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" containerName="extract-utilities" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.383281 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" containerName="extract-utilities" Jan 30 12:03:25 crc kubenswrapper[4703]: E0130 12:03:25.383293 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" containerName="extract-content" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.383302 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" containerName="extract-content" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.383623 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="f49515e4-f9bb-4741-a979-5d59fbc7198d" containerName="registry-server" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.383638 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" containerName="registry-server" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.383661 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="26730f5b-96f6-40f6-ab66-0500a306f988" containerName="marketplace-operator" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.383675 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="11176772-9170-499f-8fec-e460709fd300" containerName="registry-server" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.383687 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e4915d0-912f-426c-9d74-3d42e36678ed" containerName="registry-server" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.401968 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5v2j7" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.403344 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5v2j7"] Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.404764 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.453471 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0786e743-c8c4-45c5-abbe-197d54d908f6-catalog-content\") pod \"redhat-marketplace-5v2j7\" (UID: \"0786e743-c8c4-45c5-abbe-197d54d908f6\") " pod="openshift-marketplace/redhat-marketplace-5v2j7" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.454492 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hsdj\" (UniqueName: \"kubernetes.io/projected/0786e743-c8c4-45c5-abbe-197d54d908f6-kube-api-access-7hsdj\") pod \"redhat-marketplace-5v2j7\" (UID: \"0786e743-c8c4-45c5-abbe-197d54d908f6\") " pod="openshift-marketplace/redhat-marketplace-5v2j7" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.454935 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0786e743-c8c4-45c5-abbe-197d54d908f6-utilities\") pod \"redhat-marketplace-5v2j7\" (UID: \"0786e743-c8c4-45c5-abbe-197d54d908f6\") " pod="openshift-marketplace/redhat-marketplace-5v2j7" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.455778 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8gvnv\" (UniqueName: \"kubernetes.io/projected/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-kube-api-access-8gvnv\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.455934 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.537939 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" (UID: "b8c48ae5-5f36-4ab5-b6a8-30e51be6d436"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.560482 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0786e743-c8c4-45c5-abbe-197d54d908f6-catalog-content\") pod \"redhat-marketplace-5v2j7\" (UID: \"0786e743-c8c4-45c5-abbe-197d54d908f6\") " pod="openshift-marketplace/redhat-marketplace-5v2j7" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.561166 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hsdj\" (UniqueName: \"kubernetes.io/projected/0786e743-c8c4-45c5-abbe-197d54d908f6-kube-api-access-7hsdj\") pod \"redhat-marketplace-5v2j7\" (UID: \"0786e743-c8c4-45c5-abbe-197d54d908f6\") " pod="openshift-marketplace/redhat-marketplace-5v2j7" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.561297 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0786e743-c8c4-45c5-abbe-197d54d908f6-utilities\") pod \"redhat-marketplace-5v2j7\" (UID: \"0786e743-c8c4-45c5-abbe-197d54d908f6\") " pod="openshift-marketplace/redhat-marketplace-5v2j7" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.561497 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.561503 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0786e743-c8c4-45c5-abbe-197d54d908f6-catalog-content\") pod \"redhat-marketplace-5v2j7\" (UID: \"0786e743-c8c4-45c5-abbe-197d54d908f6\") " pod="openshift-marketplace/redhat-marketplace-5v2j7" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.562378 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0786e743-c8c4-45c5-abbe-197d54d908f6-utilities\") pod \"redhat-marketplace-5v2j7\" (UID: \"0786e743-c8c4-45c5-abbe-197d54d908f6\") " pod="openshift-marketplace/redhat-marketplace-5v2j7" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.597426 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hsdj\" (UniqueName: \"kubernetes.io/projected/0786e743-c8c4-45c5-abbe-197d54d908f6-kube-api-access-7hsdj\") pod \"redhat-marketplace-5v2j7\" (UID: \"0786e743-c8c4-45c5-abbe-197d54d908f6\") " pod="openshift-marketplace/redhat-marketplace-5v2j7" Jan 30 12:03:25 crc kubenswrapper[4703]: I0130 12:03:25.739291 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5v2j7" Jan 30 12:03:26 crc kubenswrapper[4703]: I0130 12:03:26.213643 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ngspg" Jan 30 12:03:26 crc kubenswrapper[4703]: I0130 12:03:26.213594 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ngspg" event={"ID":"b8c48ae5-5f36-4ab5-b6a8-30e51be6d436","Type":"ContainerDied","Data":"4df821b456f302cb841bf3aefde2accd9138542c6b397085a6eea587f8ae5f3f"} Jan 30 12:03:26 crc kubenswrapper[4703]: I0130 12:03:26.213850 4703 scope.go:117] "RemoveContainer" containerID="3ad1813ed1716fa3c61d57cb0eb17f0f73685562b8aa6e0a68ceeb80bdb4f3d7" Jan 30 12:03:26 crc kubenswrapper[4703]: I0130 12:03:26.246081 4703 scope.go:117] "RemoveContainer" containerID="de852a391eee3a25e94fda187413572463abd578a30b743a54190e01ba64fb48" Jan 30 12:03:26 crc kubenswrapper[4703]: I0130 12:03:26.259333 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ngspg"] Jan 30 12:03:26 crc kubenswrapper[4703]: I0130 12:03:26.268081 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ngspg"] Jan 30 12:03:26 crc kubenswrapper[4703]: I0130 12:03:26.295634 4703 scope.go:117] "RemoveContainer" containerID="d65257231dc110632856b5d8f3ce3f8e47f9990f70427eac3f5a518841d8931e" Jan 30 12:03:26 crc kubenswrapper[4703]: I0130 12:03:26.332603 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5v2j7"] Jan 30 12:03:26 crc kubenswrapper[4703]: W0130 12:03:26.341348 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0786e743_c8c4_45c5_abbe_197d54d908f6.slice/crio-1aabfdf6281698adfd74979e7c36ed364b6038e4d0d0f567aeab6ba3560edfa5 WatchSource:0}: Error finding container 1aabfdf6281698adfd74979e7c36ed364b6038e4d0d0f567aeab6ba3560edfa5: Status 404 returned error can't find the container with id 1aabfdf6281698adfd74979e7c36ed364b6038e4d0d0f567aeab6ba3560edfa5 Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.095357 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8c48ae5-5f36-4ab5-b6a8-30e51be6d436" path="/var/lib/kubelet/pods/b8c48ae5-5f36-4ab5-b6a8-30e51be6d436/volumes" Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.220422 4703 generic.go:334] "Generic (PLEG): container finished" podID="0786e743-c8c4-45c5-abbe-197d54d908f6" containerID="087754006c300301bebdd854504a0a2b36d2fb318747cf05535504fe3169bcf8" exitCode=0 Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.220539 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5v2j7" event={"ID":"0786e743-c8c4-45c5-abbe-197d54d908f6","Type":"ContainerDied","Data":"087754006c300301bebdd854504a0a2b36d2fb318747cf05535504fe3169bcf8"} Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.220577 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5v2j7" event={"ID":"0786e743-c8c4-45c5-abbe-197d54d908f6","Type":"ContainerStarted","Data":"1aabfdf6281698adfd74979e7c36ed364b6038e4d0d0f567aeab6ba3560edfa5"} Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.776310 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-r7gjb"] Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.779498 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r7gjb" Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.785835 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.791807 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r7gjb"] Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.807314 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa-utilities\") pod \"community-operators-r7gjb\" (UID: \"1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa\") " pod="openshift-marketplace/community-operators-r7gjb" Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.807380 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vh5gg\" (UniqueName: \"kubernetes.io/projected/1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa-kube-api-access-vh5gg\") pod \"community-operators-r7gjb\" (UID: \"1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa\") " pod="openshift-marketplace/community-operators-r7gjb" Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.807468 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa-catalog-content\") pod \"community-operators-r7gjb\" (UID: \"1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa\") " pod="openshift-marketplace/community-operators-r7gjb" Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.911675 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa-catalog-content\") pod \"community-operators-r7gjb\" (UID: \"1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa\") " pod="openshift-marketplace/community-operators-r7gjb" Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.911850 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa-utilities\") pod \"community-operators-r7gjb\" (UID: \"1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa\") " pod="openshift-marketplace/community-operators-r7gjb" Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.911900 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vh5gg\" (UniqueName: \"kubernetes.io/projected/1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa-kube-api-access-vh5gg\") pod \"community-operators-r7gjb\" (UID: \"1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa\") " pod="openshift-marketplace/community-operators-r7gjb" Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.913257 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa-utilities\") pod \"community-operators-r7gjb\" (UID: \"1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa\") " pod="openshift-marketplace/community-operators-r7gjb" Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.913856 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa-catalog-content\") pod \"community-operators-r7gjb\" (UID: \"1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa\") " pod="openshift-marketplace/community-operators-r7gjb" Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.959442 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vh5gg\" (UniqueName: \"kubernetes.io/projected/1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa-kube-api-access-vh5gg\") pod \"community-operators-r7gjb\" (UID: \"1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa\") " pod="openshift-marketplace/community-operators-r7gjb" Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.974067 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-q9b56"] Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.975398 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q9b56" Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.981837 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 30 12:03:27 crc kubenswrapper[4703]: I0130 12:03:27.984459 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q9b56"] Jan 30 12:03:28 crc kubenswrapper[4703]: I0130 12:03:28.012984 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rz9bx\" (UniqueName: \"kubernetes.io/projected/17ab8a97-9b84-4eac-be90-6d5283440ddb-kube-api-access-rz9bx\") pod \"certified-operators-q9b56\" (UID: \"17ab8a97-9b84-4eac-be90-6d5283440ddb\") " pod="openshift-marketplace/certified-operators-q9b56" Jan 30 12:03:28 crc kubenswrapper[4703]: I0130 12:03:28.013041 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17ab8a97-9b84-4eac-be90-6d5283440ddb-utilities\") pod \"certified-operators-q9b56\" (UID: \"17ab8a97-9b84-4eac-be90-6d5283440ddb\") " pod="openshift-marketplace/certified-operators-q9b56" Jan 30 12:03:28 crc kubenswrapper[4703]: I0130 12:03:28.013106 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17ab8a97-9b84-4eac-be90-6d5283440ddb-catalog-content\") pod \"certified-operators-q9b56\" (UID: \"17ab8a97-9b84-4eac-be90-6d5283440ddb\") " pod="openshift-marketplace/certified-operators-q9b56" Jan 30 12:03:28 crc kubenswrapper[4703]: I0130 12:03:28.113724 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-r7gjb" Jan 30 12:03:28 crc kubenswrapper[4703]: I0130 12:03:28.115434 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rz9bx\" (UniqueName: \"kubernetes.io/projected/17ab8a97-9b84-4eac-be90-6d5283440ddb-kube-api-access-rz9bx\") pod \"certified-operators-q9b56\" (UID: \"17ab8a97-9b84-4eac-be90-6d5283440ddb\") " pod="openshift-marketplace/certified-operators-q9b56" Jan 30 12:03:28 crc kubenswrapper[4703]: I0130 12:03:28.115500 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17ab8a97-9b84-4eac-be90-6d5283440ddb-utilities\") pod \"certified-operators-q9b56\" (UID: \"17ab8a97-9b84-4eac-be90-6d5283440ddb\") " pod="openshift-marketplace/certified-operators-q9b56" Jan 30 12:03:28 crc kubenswrapper[4703]: I0130 12:03:28.115616 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17ab8a97-9b84-4eac-be90-6d5283440ddb-catalog-content\") pod \"certified-operators-q9b56\" (UID: \"17ab8a97-9b84-4eac-be90-6d5283440ddb\") " pod="openshift-marketplace/certified-operators-q9b56" Jan 30 12:03:28 crc kubenswrapper[4703]: I0130 12:03:28.116809 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17ab8a97-9b84-4eac-be90-6d5283440ddb-catalog-content\") pod \"certified-operators-q9b56\" (UID: \"17ab8a97-9b84-4eac-be90-6d5283440ddb\") " pod="openshift-marketplace/certified-operators-q9b56" Jan 30 12:03:28 crc kubenswrapper[4703]: I0130 12:03:28.116800 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17ab8a97-9b84-4eac-be90-6d5283440ddb-utilities\") pod \"certified-operators-q9b56\" (UID: \"17ab8a97-9b84-4eac-be90-6d5283440ddb\") " pod="openshift-marketplace/certified-operators-q9b56" Jan 30 12:03:28 crc kubenswrapper[4703]: I0130 12:03:28.147759 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rz9bx\" (UniqueName: \"kubernetes.io/projected/17ab8a97-9b84-4eac-be90-6d5283440ddb-kube-api-access-rz9bx\") pod \"certified-operators-q9b56\" (UID: \"17ab8a97-9b84-4eac-be90-6d5283440ddb\") " pod="openshift-marketplace/certified-operators-q9b56" Jan 30 12:03:28 crc kubenswrapper[4703]: I0130 12:03:28.297372 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-q9b56" Jan 30 12:03:28 crc kubenswrapper[4703]: W0130 12:03:28.735404 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d70ac4d_de5f_4d0f_84c3_eaf5545c42fa.slice/crio-102406bcad39180dc648fa72efe43a14869776162c5c3947cb5365416b59ccbf WatchSource:0}: Error finding container 102406bcad39180dc648fa72efe43a14869776162c5c3947cb5365416b59ccbf: Status 404 returned error can't find the container with id 102406bcad39180dc648fa72efe43a14869776162c5c3947cb5365416b59ccbf Jan 30 12:03:28 crc kubenswrapper[4703]: I0130 12:03:28.741516 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-r7gjb"] Jan 30 12:03:28 crc kubenswrapper[4703]: I0130 12:03:28.806817 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-q9b56"] Jan 30 12:03:28 crc kubenswrapper[4703]: W0130 12:03:28.824557 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod17ab8a97_9b84_4eac_be90_6d5283440ddb.slice/crio-d44b2174ae42a490fce8d73e06b48ea2f9b52127cda02ac15ced8602c3194e69 WatchSource:0}: Error finding container d44b2174ae42a490fce8d73e06b48ea2f9b52127cda02ac15ced8602c3194e69: Status 404 returned error can't find the container with id d44b2174ae42a490fce8d73e06b48ea2f9b52127cda02ac15ced8602c3194e69 Jan 30 12:03:29 crc kubenswrapper[4703]: I0130 12:03:29.746423 4703 generic.go:334] "Generic (PLEG): container finished" podID="1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa" containerID="6d8ac67b29cf8ceda15e0773cc5a4075bed228300f9497ae9253af9a7096e081" exitCode=0 Jan 30 12:03:29 crc kubenswrapper[4703]: I0130 12:03:29.747543 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7gjb" event={"ID":"1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa","Type":"ContainerDied","Data":"6d8ac67b29cf8ceda15e0773cc5a4075bed228300f9497ae9253af9a7096e081"} Jan 30 12:03:29 crc kubenswrapper[4703]: I0130 12:03:29.747627 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7gjb" event={"ID":"1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa","Type":"ContainerStarted","Data":"102406bcad39180dc648fa72efe43a14869776162c5c3947cb5365416b59ccbf"} Jan 30 12:03:29 crc kubenswrapper[4703]: I0130 12:03:29.752952 4703 generic.go:334] "Generic (PLEG): container finished" podID="17ab8a97-9b84-4eac-be90-6d5283440ddb" containerID="3cfd7177ef568b611010540e6317f22e58d6d6b6c289049a7e946c0361c82a17" exitCode=0 Jan 30 12:03:29 crc kubenswrapper[4703]: I0130 12:03:29.753062 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q9b56" event={"ID":"17ab8a97-9b84-4eac-be90-6d5283440ddb","Type":"ContainerDied","Data":"3cfd7177ef568b611010540e6317f22e58d6d6b6c289049a7e946c0361c82a17"} Jan 30 12:03:29 crc kubenswrapper[4703]: I0130 12:03:29.753157 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q9b56" event={"ID":"17ab8a97-9b84-4eac-be90-6d5283440ddb","Type":"ContainerStarted","Data":"d44b2174ae42a490fce8d73e06b48ea2f9b52127cda02ac15ced8602c3194e69"} Jan 30 12:03:29 crc kubenswrapper[4703]: I0130 12:03:29.777321 4703 generic.go:334] "Generic (PLEG): container finished" podID="0786e743-c8c4-45c5-abbe-197d54d908f6" containerID="a58abbca1085a312b3c4cb7dd89b860a6d2a742f8852ec2cdd36679e8a2ffdd2" exitCode=0 Jan 30 12:03:29 crc kubenswrapper[4703]: I0130 12:03:29.777406 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5v2j7" event={"ID":"0786e743-c8c4-45c5-abbe-197d54d908f6","Type":"ContainerDied","Data":"a58abbca1085a312b3c4cb7dd89b860a6d2a742f8852ec2cdd36679e8a2ffdd2"} Jan 30 12:03:30 crc kubenswrapper[4703]: I0130 12:03:30.880247 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7gjb" event={"ID":"1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa","Type":"ContainerStarted","Data":"7bfa374bcbd0b899216d7909b6f950e662128705994953ddd6e1a25d20ed322d"} Jan 30 12:03:30 crc kubenswrapper[4703]: I0130 12:03:30.885435 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q9b56" event={"ID":"17ab8a97-9b84-4eac-be90-6d5283440ddb","Type":"ContainerStarted","Data":"e4c6ac75484c56b9d391c9e2b76c3b963e5c7139af557f02baee80c65930fb81"} Jan 30 12:03:30 crc kubenswrapper[4703]: I0130 12:03:30.888965 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5v2j7" event={"ID":"0786e743-c8c4-45c5-abbe-197d54d908f6","Type":"ContainerStarted","Data":"bb3615391f841bd25bed65dfa31aa926d759311b06d3abb863617aebbe69a542"} Jan 30 12:03:31 crc kubenswrapper[4703]: I0130 12:03:31.169750 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5v2j7" podStartSLOduration=3.188839379 podStartE2EDuration="6.169661714s" podCreationTimestamp="2026-01-30 12:03:25 +0000 UTC" firstStartedPulling="2026-01-30 12:03:27.223692091 +0000 UTC m=+443.001513735" lastFinishedPulling="2026-01-30 12:03:30.204514416 +0000 UTC m=+445.982336070" observedRunningTime="2026-01-30 12:03:30.970744719 +0000 UTC m=+446.748566373" watchObservedRunningTime="2026-01-30 12:03:31.169661714 +0000 UTC m=+446.947483368" Jan 30 12:03:31 crc kubenswrapper[4703]: I0130 12:03:31.175569 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ttcs9"] Jan 30 12:03:31 crc kubenswrapper[4703]: I0130 12:03:31.182271 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ttcs9" Jan 30 12:03:31 crc kubenswrapper[4703]: I0130 12:03:31.188408 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 30 12:03:31 crc kubenswrapper[4703]: I0130 12:03:31.214883 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ttcs9"] Jan 30 12:03:31 crc kubenswrapper[4703]: I0130 12:03:31.376514 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed-utilities\") pod \"redhat-operators-ttcs9\" (UID: \"69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed\") " pod="openshift-marketplace/redhat-operators-ttcs9" Jan 30 12:03:31 crc kubenswrapper[4703]: I0130 12:03:31.376608 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4d8c\" (UniqueName: \"kubernetes.io/projected/69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed-kube-api-access-f4d8c\") pod \"redhat-operators-ttcs9\" (UID: \"69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed\") " pod="openshift-marketplace/redhat-operators-ttcs9" Jan 30 12:03:31 crc kubenswrapper[4703]: I0130 12:03:31.377384 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed-catalog-content\") pod \"redhat-operators-ttcs9\" (UID: \"69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed\") " pod="openshift-marketplace/redhat-operators-ttcs9" Jan 30 12:03:31 crc kubenswrapper[4703]: I0130 12:03:31.478634 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed-catalog-content\") pod \"redhat-operators-ttcs9\" (UID: \"69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed\") " pod="openshift-marketplace/redhat-operators-ttcs9" Jan 30 12:03:31 crc kubenswrapper[4703]: I0130 12:03:31.478782 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed-utilities\") pod \"redhat-operators-ttcs9\" (UID: \"69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed\") " pod="openshift-marketplace/redhat-operators-ttcs9" Jan 30 12:03:31 crc kubenswrapper[4703]: I0130 12:03:31.478844 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4d8c\" (UniqueName: \"kubernetes.io/projected/69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed-kube-api-access-f4d8c\") pod \"redhat-operators-ttcs9\" (UID: \"69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed\") " pod="openshift-marketplace/redhat-operators-ttcs9" Jan 30 12:03:31 crc kubenswrapper[4703]: I0130 12:03:31.479880 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed-catalog-content\") pod \"redhat-operators-ttcs9\" (UID: \"69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed\") " pod="openshift-marketplace/redhat-operators-ttcs9" Jan 30 12:03:31 crc kubenswrapper[4703]: I0130 12:03:31.480227 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed-utilities\") pod \"redhat-operators-ttcs9\" (UID: \"69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed\") " pod="openshift-marketplace/redhat-operators-ttcs9" Jan 30 12:03:31 crc kubenswrapper[4703]: I0130 12:03:31.510648 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4d8c\" (UniqueName: \"kubernetes.io/projected/69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed-kube-api-access-f4d8c\") pod \"redhat-operators-ttcs9\" (UID: \"69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed\") " pod="openshift-marketplace/redhat-operators-ttcs9" Jan 30 12:03:31 crc kubenswrapper[4703]: I0130 12:03:31.556106 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ttcs9" Jan 30 12:03:31 crc kubenswrapper[4703]: I0130 12:03:31.904824 4703 generic.go:334] "Generic (PLEG): container finished" podID="17ab8a97-9b84-4eac-be90-6d5283440ddb" containerID="e4c6ac75484c56b9d391c9e2b76c3b963e5c7139af557f02baee80c65930fb81" exitCode=0 Jan 30 12:03:31 crc kubenswrapper[4703]: I0130 12:03:31.904969 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q9b56" event={"ID":"17ab8a97-9b84-4eac-be90-6d5283440ddb","Type":"ContainerDied","Data":"e4c6ac75484c56b9d391c9e2b76c3b963e5c7139af557f02baee80c65930fb81"} Jan 30 12:03:32 crc kubenswrapper[4703]: I0130 12:03:32.148785 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ttcs9"] Jan 30 12:03:32 crc kubenswrapper[4703]: I0130 12:03:32.913548 4703 generic.go:334] "Generic (PLEG): container finished" podID="69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed" containerID="36f3fab1bcb9d46758dfa63f902bb84d7bbe58ea7b4b4bb493f3701f81f9a3c8" exitCode=0 Jan 30 12:03:32 crc kubenswrapper[4703]: I0130 12:03:32.913659 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ttcs9" event={"ID":"69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed","Type":"ContainerDied","Data":"36f3fab1bcb9d46758dfa63f902bb84d7bbe58ea7b4b4bb493f3701f81f9a3c8"} Jan 30 12:03:32 crc kubenswrapper[4703]: I0130 12:03:32.914827 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ttcs9" event={"ID":"69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed","Type":"ContainerStarted","Data":"e39dfb4cd3284395fb2ed2bc0cf1809e9c804dd6bf5c27d91af75d1a809b62cb"} Jan 30 12:03:32 crc kubenswrapper[4703]: I0130 12:03:32.919147 4703 generic.go:334] "Generic (PLEG): container finished" podID="1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa" containerID="7bfa374bcbd0b899216d7909b6f950e662128705994953ddd6e1a25d20ed322d" exitCode=0 Jan 30 12:03:32 crc kubenswrapper[4703]: I0130 12:03:32.919252 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7gjb" event={"ID":"1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa","Type":"ContainerDied","Data":"7bfa374bcbd0b899216d7909b6f950e662128705994953ddd6e1a25d20ed322d"} Jan 30 12:03:32 crc kubenswrapper[4703]: I0130 12:03:32.923444 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-q9b56" event={"ID":"17ab8a97-9b84-4eac-be90-6d5283440ddb","Type":"ContainerStarted","Data":"3cc29f656deaef1956f24f84d10d30ee29acc79a6b3d021bd09957b1231740c0"} Jan 30 12:03:32 crc kubenswrapper[4703]: I0130 12:03:32.973473 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-q9b56" podStartSLOduration=3.421668307 podStartE2EDuration="5.973440072s" podCreationTimestamp="2026-01-30 12:03:27 +0000 UTC" firstStartedPulling="2026-01-30 12:03:29.764262652 +0000 UTC m=+445.542084306" lastFinishedPulling="2026-01-30 12:03:32.316034417 +0000 UTC m=+448.093856071" observedRunningTime="2026-01-30 12:03:32.966631125 +0000 UTC m=+448.744452779" watchObservedRunningTime="2026-01-30 12:03:32.973440072 +0000 UTC m=+448.751261726" Jan 30 12:03:33 crc kubenswrapper[4703]: I0130 12:03:33.934426 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-r7gjb" event={"ID":"1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa","Type":"ContainerStarted","Data":"9def82fb03265ae32c82c80bb5894895576b4d0ffb2696733d51f57c49c98c3f"} Jan 30 12:03:33 crc kubenswrapper[4703]: I0130 12:03:33.937427 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ttcs9" event={"ID":"69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed","Type":"ContainerStarted","Data":"90b1a9c218a02047798bb3c8faabdda20e32a30e35ed434a4c424e9d0a7bd6a9"} Jan 30 12:03:33 crc kubenswrapper[4703]: I0130 12:03:33.966849 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-r7gjb" podStartSLOduration=3.306325641 podStartE2EDuration="6.966821684s" podCreationTimestamp="2026-01-30 12:03:27 +0000 UTC" firstStartedPulling="2026-01-30 12:03:29.750349321 +0000 UTC m=+445.528170975" lastFinishedPulling="2026-01-30 12:03:33.410845374 +0000 UTC m=+449.188667018" observedRunningTime="2026-01-30 12:03:33.963029475 +0000 UTC m=+449.740851129" watchObservedRunningTime="2026-01-30 12:03:33.966821684 +0000 UTC m=+449.744643338" Jan 30 12:03:35 crc kubenswrapper[4703]: I0130 12:03:35.886552 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5v2j7" Jan 30 12:03:35 crc kubenswrapper[4703]: I0130 12:03:35.889331 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5v2j7" Jan 30 12:03:35 crc kubenswrapper[4703]: I0130 12:03:35.970355 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5v2j7" Jan 30 12:03:37 crc kubenswrapper[4703]: I0130 12:03:37.027148 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5v2j7" Jan 30 12:03:37 crc kubenswrapper[4703]: I0130 12:03:37.975256 4703 generic.go:334] "Generic (PLEG): container finished" podID="69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed" containerID="90b1a9c218a02047798bb3c8faabdda20e32a30e35ed434a4c424e9d0a7bd6a9" exitCode=0 Jan 30 12:03:37 crc kubenswrapper[4703]: I0130 12:03:37.975373 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ttcs9" event={"ID":"69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed","Type":"ContainerDied","Data":"90b1a9c218a02047798bb3c8faabdda20e32a30e35ed434a4c424e9d0a7bd6a9"} Jan 30 12:03:38 crc kubenswrapper[4703]: I0130 12:03:38.114656 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-r7gjb" Jan 30 12:03:38 crc kubenswrapper[4703]: I0130 12:03:38.114751 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-r7gjb" Jan 30 12:03:38 crc kubenswrapper[4703]: I0130 12:03:38.168547 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-r7gjb" Jan 30 12:03:38 crc kubenswrapper[4703]: I0130 12:03:38.300012 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-q9b56" Jan 30 12:03:38 crc kubenswrapper[4703]: I0130 12:03:38.300147 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-q9b56" Jan 30 12:03:38 crc kubenswrapper[4703]: I0130 12:03:38.501084 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-q9b56" Jan 30 12:03:38 crc kubenswrapper[4703]: I0130 12:03:38.540636 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" podUID="2ab63a92-b99a-4d22-a8b0-a30409cd6ba5" containerName="registry" containerID="cri-o://552ddaca5d3e9e9e211dfe06442acc8fbc47dd28d0a6e1fbbfb219564697658f" gracePeriod=30 Jan 30 12:03:39 crc kubenswrapper[4703]: I0130 12:03:39.139078 4703 generic.go:334] "Generic (PLEG): container finished" podID="2ab63a92-b99a-4d22-a8b0-a30409cd6ba5" containerID="552ddaca5d3e9e9e211dfe06442acc8fbc47dd28d0a6e1fbbfb219564697658f" exitCode=0 Jan 30 12:03:39 crc kubenswrapper[4703]: I0130 12:03:39.139098 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" event={"ID":"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5","Type":"ContainerDied","Data":"552ddaca5d3e9e9e211dfe06442acc8fbc47dd28d0a6e1fbbfb219564697658f"} Jan 30 12:03:39 crc kubenswrapper[4703]: I0130 12:03:39.215042 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-q9b56" Jan 30 12:03:39 crc kubenswrapper[4703]: I0130 12:03:39.217271 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-r7gjb" Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.150390 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" event={"ID":"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5","Type":"ContainerDied","Data":"7824f593c6b33e0ceb239b77b697112c82d4c55fb7d0a97ef13b7eac6eab3230"} Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.150910 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7824f593c6b33e0ceb239b77b697112c82d4c55fb7d0a97ef13b7eac6eab3230" Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.286173 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.436161 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-bound-sa-token\") pod \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.436313 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hmfk2\" (UniqueName: \"kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-kube-api-access-hmfk2\") pod \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.585908 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.586374 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-registry-certificates\") pod \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.586493 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-registry-tls\") pod \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.586581 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-trusted-ca\") pod \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.586626 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-ca-trust-extracted\") pod \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.586670 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-installation-pull-secrets\") pod \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\" (UID: \"2ab63a92-b99a-4d22-a8b0-a30409cd6ba5\") " Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.594986 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.603366 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.604028 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.608261 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-kube-api-access-hmfk2" (OuterVolumeSpecName: "kube-api-access-hmfk2") pod "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5"). InnerVolumeSpecName "kube-api-access-hmfk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.608328 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.608713 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.643109 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.658270 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5" (UID: "2ab63a92-b99a-4d22-a8b0-a30409cd6ba5"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.687866 4703 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.687916 4703 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.687926 4703 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.687937 4703 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.687951 4703 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.687960 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hmfk2\" (UniqueName: \"kubernetes.io/projected/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-kube-api-access-hmfk2\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:40 crc kubenswrapper[4703]: I0130 12:03:40.687969 4703 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 30 12:03:41 crc kubenswrapper[4703]: I0130 12:03:41.227000 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-2r4b9" Jan 30 12:03:41 crc kubenswrapper[4703]: I0130 12:03:41.227979 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ttcs9" event={"ID":"69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed","Type":"ContainerStarted","Data":"3bc69500f52dc404496e0259dbea8e60f2f3fe91febc70a61a1051d726651aaf"} Jan 30 12:03:41 crc kubenswrapper[4703]: I0130 12:03:41.291371 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ttcs9" podStartSLOduration=4.605337144 podStartE2EDuration="10.291343057s" podCreationTimestamp="2026-01-30 12:03:31 +0000 UTC" firstStartedPulling="2026-01-30 12:03:32.916180511 +0000 UTC m=+448.694002165" lastFinishedPulling="2026-01-30 12:03:38.602186424 +0000 UTC m=+454.380008078" observedRunningTime="2026-01-30 12:03:41.270408241 +0000 UTC m=+457.048229895" watchObservedRunningTime="2026-01-30 12:03:41.291343057 +0000 UTC m=+457.069164711" Jan 30 12:03:41 crc kubenswrapper[4703]: I0130 12:03:41.291847 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2r4b9"] Jan 30 12:03:41 crc kubenswrapper[4703]: I0130 12:03:41.296331 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-2r4b9"] Jan 30 12:03:41 crc kubenswrapper[4703]: I0130 12:03:41.556993 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ttcs9" Jan 30 12:03:41 crc kubenswrapper[4703]: I0130 12:03:41.557075 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ttcs9" Jan 30 12:03:42 crc kubenswrapper[4703]: I0130 12:03:42.645464 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ttcs9" podUID="69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed" containerName="registry-server" probeResult="failure" output=< Jan 30 12:03:42 crc kubenswrapper[4703]: timeout: failed to connect service ":50051" within 1s Jan 30 12:03:42 crc kubenswrapper[4703]: > Jan 30 12:03:42 crc kubenswrapper[4703]: I0130 12:03:42.822767 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:03:42 crc kubenswrapper[4703]: I0130 12:03:42.822873 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:03:42 crc kubenswrapper[4703]: I0130 12:03:42.822946 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 12:03:42 crc kubenswrapper[4703]: I0130 12:03:42.823924 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"24f8d677e145509afc38b556c3f5389f822557c4fa4acc9a9cf095e193cc4c81"} pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 12:03:42 crc kubenswrapper[4703]: I0130 12:03:42.823996 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" containerID="cri-o://24f8d677e145509afc38b556c3f5389f822557c4fa4acc9a9cf095e193cc4c81" gracePeriod=600 Jan 30 12:03:43 crc kubenswrapper[4703]: I0130 12:03:43.255526 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ab63a92-b99a-4d22-a8b0-a30409cd6ba5" path="/var/lib/kubelet/pods/2ab63a92-b99a-4d22-a8b0-a30409cd6ba5/volumes" Jan 30 12:03:43 crc kubenswrapper[4703]: I0130 12:03:43.260766 4703 generic.go:334] "Generic (PLEG): container finished" podID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerID="24f8d677e145509afc38b556c3f5389f822557c4fa4acc9a9cf095e193cc4c81" exitCode=0 Jan 30 12:03:43 crc kubenswrapper[4703]: I0130 12:03:43.260848 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerDied","Data":"24f8d677e145509afc38b556c3f5389f822557c4fa4acc9a9cf095e193cc4c81"} Jan 30 12:03:43 crc kubenswrapper[4703]: I0130 12:03:43.260975 4703 scope.go:117] "RemoveContainer" containerID="58de3d8c2be7d8f241344ee25e2ab636d07b3886208be17dd08d628247c779ed" Jan 30 12:03:44 crc kubenswrapper[4703]: I0130 12:03:44.271495 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerStarted","Data":"2897b756e6469910e024f9272f3f823e188d794a43ea99cbf356f2af1315b70e"} Jan 30 12:03:51 crc kubenswrapper[4703]: I0130 12:03:51.609652 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ttcs9" Jan 30 12:03:51 crc kubenswrapper[4703]: I0130 12:03:51.659052 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ttcs9" Jan 30 12:05:05 crc kubenswrapper[4703]: I0130 12:05:05.950765 4703 scope.go:117] "RemoveContainer" containerID="dbeece1e92f2d9f2c556e8d001ffd7952f1584801fd06fe35cd04bb77ef31785" Jan 30 12:06:06 crc kubenswrapper[4703]: I0130 12:06:06.003874 4703 scope.go:117] "RemoveContainer" containerID="552ddaca5d3e9e9e211dfe06442acc8fbc47dd28d0a6e1fbbfb219564697658f" Jan 30 12:06:12 crc kubenswrapper[4703]: I0130 12:06:12.823159 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:06:12 crc kubenswrapper[4703]: I0130 12:06:12.824249 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:06:42 crc kubenswrapper[4703]: I0130 12:06:42.822809 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:06:42 crc kubenswrapper[4703]: I0130 12:06:42.823938 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:07:12 crc kubenswrapper[4703]: I0130 12:07:12.823619 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:07:12 crc kubenswrapper[4703]: I0130 12:07:12.824644 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:07:12 crc kubenswrapper[4703]: I0130 12:07:12.824707 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 12:07:12 crc kubenswrapper[4703]: I0130 12:07:12.825707 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2897b756e6469910e024f9272f3f823e188d794a43ea99cbf356f2af1315b70e"} pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 12:07:12 crc kubenswrapper[4703]: I0130 12:07:12.825795 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" containerID="cri-o://2897b756e6469910e024f9272f3f823e188d794a43ea99cbf356f2af1315b70e" gracePeriod=600 Jan 30 12:07:13 crc kubenswrapper[4703]: I0130 12:07:13.087287 4703 generic.go:334] "Generic (PLEG): container finished" podID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerID="2897b756e6469910e024f9272f3f823e188d794a43ea99cbf356f2af1315b70e" exitCode=0 Jan 30 12:07:13 crc kubenswrapper[4703]: I0130 12:07:13.093095 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerDied","Data":"2897b756e6469910e024f9272f3f823e188d794a43ea99cbf356f2af1315b70e"} Jan 30 12:07:13 crc kubenswrapper[4703]: I0130 12:07:13.093208 4703 scope.go:117] "RemoveContainer" containerID="24f8d677e145509afc38b556c3f5389f822557c4fa4acc9a9cf095e193cc4c81" Jan 30 12:07:14 crc kubenswrapper[4703]: I0130 12:07:14.095656 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerStarted","Data":"55a18e70c04b9c0432452c8dbe489a57bb034e3a138ce8caf3e700f751921742"} Jan 30 12:09:06 crc kubenswrapper[4703]: I0130 12:09:06.068361 4703 scope.go:117] "RemoveContainer" containerID="238e7413fd4b821e3d14d6e874182e4ba58a86aed0fd9879c836d29928f71e99" Jan 30 12:09:13 crc kubenswrapper[4703]: I0130 12:09:13.780785 4703 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.417772 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-lxbpt"] Jan 30 12:09:30 crc kubenswrapper[4703]: E0130 12:09:30.419319 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ab63a92-b99a-4d22-a8b0-a30409cd6ba5" containerName="registry" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.419364 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ab63a92-b99a-4d22-a8b0-a30409cd6ba5" containerName="registry" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.419509 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ab63a92-b99a-4d22-a8b0-a30409cd6ba5" containerName="registry" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.420171 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-lxbpt" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.428289 4703 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-6l64q" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.428420 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.432375 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-858654f9db-tfrws"] Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.440433 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.451252 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-tfrws"] Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.462361 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-lxbpt"] Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.487944 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-tfrws" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.493194 4703 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-t7qr2" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.504981 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-fpdbd"] Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.510022 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-fpdbd" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.513213 4703 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-b2hdh" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.516879 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-fpdbd"] Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.590288 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6sr6\" (UniqueName: \"kubernetes.io/projected/bba7ab08-ac7f-40d7-9da9-61da28bf3023-kube-api-access-m6sr6\") pod \"cert-manager-cainjector-cf98fcc89-lxbpt\" (UID: \"bba7ab08-ac7f-40d7-9da9-61da28bf3023\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-lxbpt" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.590623 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsj9r\" (UniqueName: \"kubernetes.io/projected/86682bcd-a9b5-485b-b91d-81b53fa2536e-kube-api-access-wsj9r\") pod \"cert-manager-858654f9db-tfrws\" (UID: \"86682bcd-a9b5-485b-b91d-81b53fa2536e\") " pod="cert-manager/cert-manager-858654f9db-tfrws" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.590702 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7489v\" (UniqueName: \"kubernetes.io/projected/60c1cbf2-1a9e-481a-b264-5f3cd8536c08-kube-api-access-7489v\") pod \"cert-manager-webhook-687f57d79b-fpdbd\" (UID: \"60c1cbf2-1a9e-481a-b264-5f3cd8536c08\") " pod="cert-manager/cert-manager-webhook-687f57d79b-fpdbd" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.691458 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsj9r\" (UniqueName: \"kubernetes.io/projected/86682bcd-a9b5-485b-b91d-81b53fa2536e-kube-api-access-wsj9r\") pod \"cert-manager-858654f9db-tfrws\" (UID: \"86682bcd-a9b5-485b-b91d-81b53fa2536e\") " pod="cert-manager/cert-manager-858654f9db-tfrws" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.691564 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7489v\" (UniqueName: \"kubernetes.io/projected/60c1cbf2-1a9e-481a-b264-5f3cd8536c08-kube-api-access-7489v\") pod \"cert-manager-webhook-687f57d79b-fpdbd\" (UID: \"60c1cbf2-1a9e-481a-b264-5f3cd8536c08\") " pod="cert-manager/cert-manager-webhook-687f57d79b-fpdbd" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.691636 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6sr6\" (UniqueName: \"kubernetes.io/projected/bba7ab08-ac7f-40d7-9da9-61da28bf3023-kube-api-access-m6sr6\") pod \"cert-manager-cainjector-cf98fcc89-lxbpt\" (UID: \"bba7ab08-ac7f-40d7-9da9-61da28bf3023\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-lxbpt" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.717346 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6sr6\" (UniqueName: \"kubernetes.io/projected/bba7ab08-ac7f-40d7-9da9-61da28bf3023-kube-api-access-m6sr6\") pod \"cert-manager-cainjector-cf98fcc89-lxbpt\" (UID: \"bba7ab08-ac7f-40d7-9da9-61da28bf3023\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-lxbpt" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.718585 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsj9r\" (UniqueName: \"kubernetes.io/projected/86682bcd-a9b5-485b-b91d-81b53fa2536e-kube-api-access-wsj9r\") pod \"cert-manager-858654f9db-tfrws\" (UID: \"86682bcd-a9b5-485b-b91d-81b53fa2536e\") " pod="cert-manager/cert-manager-858654f9db-tfrws" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.718778 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7489v\" (UniqueName: \"kubernetes.io/projected/60c1cbf2-1a9e-481a-b264-5f3cd8536c08-kube-api-access-7489v\") pod \"cert-manager-webhook-687f57d79b-fpdbd\" (UID: \"60c1cbf2-1a9e-481a-b264-5f3cd8536c08\") " pod="cert-manager/cert-manager-webhook-687f57d79b-fpdbd" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.822701 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-lxbpt" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.839806 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-tfrws" Jan 30 12:09:30 crc kubenswrapper[4703]: I0130 12:09:30.848723 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-fpdbd" Jan 30 12:09:31 crc kubenswrapper[4703]: I0130 12:09:31.099757 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-tfrws"] Jan 30 12:09:31 crc kubenswrapper[4703]: W0130 12:09:31.118399 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod86682bcd_a9b5_485b_b91d_81b53fa2536e.slice/crio-9a44e7430a8163deb27d1aab0850827e3689e8f686261fcb8cde35aa1bfa12d9 WatchSource:0}: Error finding container 9a44e7430a8163deb27d1aab0850827e3689e8f686261fcb8cde35aa1bfa12d9: Status 404 returned error can't find the container with id 9a44e7430a8163deb27d1aab0850827e3689e8f686261fcb8cde35aa1bfa12d9 Jan 30 12:09:31 crc kubenswrapper[4703]: I0130 12:09:31.122397 4703 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 12:09:31 crc kubenswrapper[4703]: I0130 12:09:31.150152 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-lxbpt"] Jan 30 12:09:31 crc kubenswrapper[4703]: W0130 12:09:31.154631 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbba7ab08_ac7f_40d7_9da9_61da28bf3023.slice/crio-ce46943709f4a8966108f389f3f3ebbc252400153ad57f30293554093af6431e WatchSource:0}: Error finding container ce46943709f4a8966108f389f3f3ebbc252400153ad57f30293554093af6431e: Status 404 returned error can't find the container with id ce46943709f4a8966108f389f3f3ebbc252400153ad57f30293554093af6431e Jan 30 12:09:31 crc kubenswrapper[4703]: I0130 12:09:31.401452 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-fpdbd"] Jan 30 12:09:31 crc kubenswrapper[4703]: W0130 12:09:31.406543 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod60c1cbf2_1a9e_481a_b264_5f3cd8536c08.slice/crio-a992bcb0492feba7a964c7f6e530d13ecfa03c6e5c218ab8e680c94c88ad0cb2 WatchSource:0}: Error finding container a992bcb0492feba7a964c7f6e530d13ecfa03c6e5c218ab8e680c94c88ad0cb2: Status 404 returned error can't find the container with id a992bcb0492feba7a964c7f6e530d13ecfa03c6e5c218ab8e680c94c88ad0cb2 Jan 30 12:09:32 crc kubenswrapper[4703]: I0130 12:09:32.117361 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-tfrws" event={"ID":"86682bcd-a9b5-485b-b91d-81b53fa2536e","Type":"ContainerStarted","Data":"9a44e7430a8163deb27d1aab0850827e3689e8f686261fcb8cde35aa1bfa12d9"} Jan 30 12:09:32 crc kubenswrapper[4703]: I0130 12:09:32.120391 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-fpdbd" event={"ID":"60c1cbf2-1a9e-481a-b264-5f3cd8536c08","Type":"ContainerStarted","Data":"a992bcb0492feba7a964c7f6e530d13ecfa03c6e5c218ab8e680c94c88ad0cb2"} Jan 30 12:09:32 crc kubenswrapper[4703]: I0130 12:09:32.121777 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-lxbpt" event={"ID":"bba7ab08-ac7f-40d7-9da9-61da28bf3023","Type":"ContainerStarted","Data":"ce46943709f4a8966108f389f3f3ebbc252400153ad57f30293554093af6431e"} Jan 30 12:09:37 crc kubenswrapper[4703]: I0130 12:09:37.168578 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-lxbpt" event={"ID":"bba7ab08-ac7f-40d7-9da9-61da28bf3023","Type":"ContainerStarted","Data":"1ea2308d311588f7fd43796961d04641fe62e13a3595c58dd80bb6fef87f0616"} Jan 30 12:09:37 crc kubenswrapper[4703]: I0130 12:09:37.170553 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-tfrws" event={"ID":"86682bcd-a9b5-485b-b91d-81b53fa2536e","Type":"ContainerStarted","Data":"16e824efeffdee40b03ba9f923591d9cc2da5f9196a15e5dc0b08c1df44965dc"} Jan 30 12:09:37 crc kubenswrapper[4703]: I0130 12:09:37.173074 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-fpdbd" event={"ID":"60c1cbf2-1a9e-481a-b264-5f3cd8536c08","Type":"ContainerStarted","Data":"473ea169d1ab82ac89c042cdc7c38530cd80681a9d36454891f18c8564ba213b"} Jan 30 12:09:37 crc kubenswrapper[4703]: I0130 12:09:37.173295 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-687f57d79b-fpdbd" Jan 30 12:09:37 crc kubenswrapper[4703]: I0130 12:09:37.189102 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-cf98fcc89-lxbpt" podStartSLOduration=2.460444335 podStartE2EDuration="7.189064963s" podCreationTimestamp="2026-01-30 12:09:30 +0000 UTC" firstStartedPulling="2026-01-30 12:09:31.157701934 +0000 UTC m=+806.935523588" lastFinishedPulling="2026-01-30 12:09:35.886322562 +0000 UTC m=+811.664144216" observedRunningTime="2026-01-30 12:09:37.186746841 +0000 UTC m=+812.964568495" watchObservedRunningTime="2026-01-30 12:09:37.189064963 +0000 UTC m=+812.966886617" Jan 30 12:09:37 crc kubenswrapper[4703]: I0130 12:09:37.233440 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-687f57d79b-fpdbd" podStartSLOduration=2.721150067 podStartE2EDuration="7.233408934s" podCreationTimestamp="2026-01-30 12:09:30 +0000 UTC" firstStartedPulling="2026-01-30 12:09:31.409491536 +0000 UTC m=+807.187313190" lastFinishedPulling="2026-01-30 12:09:35.921750403 +0000 UTC m=+811.699572057" observedRunningTime="2026-01-30 12:09:37.211924637 +0000 UTC m=+812.989746301" watchObservedRunningTime="2026-01-30 12:09:37.233408934 +0000 UTC m=+813.011230588" Jan 30 12:09:37 crc kubenswrapper[4703]: I0130 12:09:37.235604 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-858654f9db-tfrws" podStartSLOduration=2.438169566 podStartE2EDuration="7.235594932s" podCreationTimestamp="2026-01-30 12:09:30 +0000 UTC" firstStartedPulling="2026-01-30 12:09:31.122094787 +0000 UTC m=+806.899916441" lastFinishedPulling="2026-01-30 12:09:35.919520153 +0000 UTC m=+811.697341807" observedRunningTime="2026-01-30 12:09:37.230533037 +0000 UTC m=+813.008354711" watchObservedRunningTime="2026-01-30 12:09:37.235594932 +0000 UTC m=+813.013416586" Jan 30 12:09:39 crc kubenswrapper[4703]: I0130 12:09:39.169611 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-n7wnf"] Jan 30 12:09:39 crc kubenswrapper[4703]: I0130 12:09:39.170738 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovn-controller" containerID="cri-o://b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c" gracePeriod=30 Jan 30 12:09:39 crc kubenswrapper[4703]: I0130 12:09:39.171341 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="sbdb" containerID="cri-o://cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8" gracePeriod=30 Jan 30 12:09:39 crc kubenswrapper[4703]: I0130 12:09:39.171402 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="nbdb" containerID="cri-o://2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0" gracePeriod=30 Jan 30 12:09:39 crc kubenswrapper[4703]: I0130 12:09:39.171455 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="northd" containerID="cri-o://9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4" gracePeriod=30 Jan 30 12:09:39 crc kubenswrapper[4703]: I0130 12:09:39.171500 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37" gracePeriod=30 Jan 30 12:09:39 crc kubenswrapper[4703]: I0130 12:09:39.171539 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="kube-rbac-proxy-node" containerID="cri-o://deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2" gracePeriod=30 Jan 30 12:09:39 crc kubenswrapper[4703]: I0130 12:09:39.171579 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovn-acl-logging" containerID="cri-o://e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e" gracePeriod=30 Jan 30 12:09:39 crc kubenswrapper[4703]: I0130 12:09:39.367758 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovnkube-controller" containerID="cri-o://4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21" gracePeriod=30 Jan 30 12:09:39 crc kubenswrapper[4703]: E0130 12:09:39.468944 4703 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod874a5df5_f6bd_4111_aefa_f43e43e1fcc0.slice/crio-7fbcc2e52627ea8ee7eb2c47a9d32d96b4388fac90edfd7cf994bb9dcfa14b6b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06ffa267_20b9_4132_9f87_1218b111ebbc.slice/crio-e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod874a5df5_f6bd_4111_aefa_f43e43e1fcc0.slice/crio-conmon-7fbcc2e52627ea8ee7eb2c47a9d32d96b4388fac90edfd7cf994bb9dcfa14b6b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06ffa267_20b9_4132_9f87_1218b111ebbc.slice/crio-d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06ffa267_20b9_4132_9f87_1218b111ebbc.slice/crio-b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod06ffa267_20b9_4132_9f87_1218b111ebbc.slice/crio-conmon-deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2.scope\": RecentStats: unable to find data in memory cache]" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.082639 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovnkube-controller/3.log" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.086944 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovn-acl-logging/0.log" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.087723 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovn-controller/0.log" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.088317 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.153000 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-d82gf"] Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.156191 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="northd" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.156456 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="northd" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.156544 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovnkube-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.156600 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovnkube-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.156654 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovn-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.156707 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovn-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.156959 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovn-acl-logging" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.157023 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovn-acl-logging" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.157081 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="kubecfg-setup" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.157165 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="kubecfg-setup" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.157269 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovnkube-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.157340 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovnkube-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.157400 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovnkube-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.157454 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovnkube-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.157504 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="nbdb" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.157557 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="nbdb" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.157607 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="sbdb" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.157656 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="sbdb" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.157722 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovnkube-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.157772 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovnkube-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.157821 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="kube-rbac-proxy-ovn-metrics" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.157872 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="kube-rbac-proxy-ovn-metrics" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.157928 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="kube-rbac-proxy-node" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.157977 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="kube-rbac-proxy-node" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.158242 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="nbdb" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.158319 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="kube-rbac-proxy-node" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.158387 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="sbdb" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.158449 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovn-acl-logging" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.158520 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovnkube-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.158591 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovn-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.158662 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovnkube-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.158737 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovnkube-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.158800 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="northd" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.158861 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovnkube-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.158928 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="kube-rbac-proxy-ovn-metrics" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.159141 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovnkube-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.159233 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovnkube-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.159413 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerName="ovnkube-controller" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.161370 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.190374 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-slash\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.190425 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-systemd\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.190548 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-slash" (OuterVolumeSpecName: "host-slash") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191084 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-env-overrides\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191146 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-run-ovn-kubernetes\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191174 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-openvswitch\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191219 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbx97\" (UniqueName: \"kubernetes.io/projected/06ffa267-20b9-4132-9f87-1218b111ebbc-kube-api-access-dbx97\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191247 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-ovnkube-script-lib\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191271 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-node-log\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191294 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-cni-netd\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191320 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-cni-bin\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191340 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-systemd-units\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191372 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-run-netns\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191395 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-var-lib-openvswitch\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191443 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-ovnkube-config\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191468 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-kubelet\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191510 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/06ffa267-20b9-4132-9f87-1218b111ebbc-ovn-node-metrics-cert\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191527 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-ovn\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191556 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-etc-openvswitch\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191576 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-log-socket\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191596 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-var-lib-cni-networks-ovn-kubernetes\") pod \"06ffa267-20b9-4132-9f87-1218b111ebbc\" (UID: \"06ffa267-20b9-4132-9f87-1218b111ebbc\") " Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191668 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5f09de14-9762-4f64-900c-df6bbfea6b78-ovn-node-metrics-cert\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191695 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-run-ovn\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191756 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-cni-netd\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191783 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-systemd-units\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191798 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-run-systemd\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191813 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-run-openvswitch\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191838 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-var-lib-openvswitch\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191859 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-cni-bin\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191889 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-log-socket\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191916 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191941 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5f09de14-9762-4f64-900c-df6bbfea6b78-ovnkube-script-lib\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191970 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5f09de14-9762-4f64-900c-df6bbfea6b78-env-overrides\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.191998 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5p9mm\" (UniqueName: \"kubernetes.io/projected/5f09de14-9762-4f64-900c-df6bbfea6b78-kube-api-access-5p9mm\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.192026 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-run-netns\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.192047 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-slash\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.192073 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-etc-openvswitch\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.192091 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-kubelet\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.192109 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-run-ovn-kubernetes\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.192145 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-node-log\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.192183 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5f09de14-9762-4f64-900c-df6bbfea6b78-ovnkube-config\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.192225 4703 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-slash\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.192295 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.192320 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.193517 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.193910 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.193967 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-node-log" (OuterVolumeSpecName: "node-log") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.194007 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.194030 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.194051 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.194080 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.194105 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.194629 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.194667 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.194693 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.198263 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-log-socket" (OuterVolumeSpecName: "log-socket") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.198352 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.198665 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.199135 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06ffa267-20b9-4132-9f87-1218b111ebbc-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.200110 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06ffa267-20b9-4132-9f87-1218b111ebbc-kube-api-access-dbx97" (OuterVolumeSpecName: "kube-api-access-dbx97") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "kube-api-access-dbx97". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.211593 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "06ffa267-20b9-4132-9f87-1218b111ebbc" (UID: "06ffa267-20b9-4132-9f87-1218b111ebbc"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.287732 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovnkube-controller/3.log" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.294985 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovn-acl-logging/0.log" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295057 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-run-ovn\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295136 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-cni-netd\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295171 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-systemd-units\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295199 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-run-systemd\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295242 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-run-openvswitch\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295263 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-run-ovn\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295263 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-cni-netd\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295313 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-run-systemd\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295274 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-var-lib-openvswitch\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295385 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-var-lib-openvswitch\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295623 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-n7wnf_06ffa267-20b9-4132-9f87-1218b111ebbc/ovn-controller/0.log" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295722 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-run-openvswitch\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295763 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-cni-bin\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295802 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-cni-bin\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295817 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-log-socket\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295850 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-log-socket\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295898 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295941 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5f09de14-9762-4f64-900c-df6bbfea6b78-ovnkube-script-lib\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295960 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295985 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5f09de14-9762-4f64-900c-df6bbfea6b78-env-overrides\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296035 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5p9mm\" (UniqueName: \"kubernetes.io/projected/5f09de14-9762-4f64-900c-df6bbfea6b78-kube-api-access-5p9mm\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296036 4703 generic.go:334] "Generic (PLEG): container finished" podID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerID="4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21" exitCode=0 Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296078 4703 generic.go:334] "Generic (PLEG): container finished" podID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerID="cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8" exitCode=0 Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296087 4703 generic.go:334] "Generic (PLEG): container finished" podID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerID="2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0" exitCode=0 Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296091 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-run-netns\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296141 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-slash\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.295753 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-systemd-units\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296097 4703 generic.go:334] "Generic (PLEG): container finished" podID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerID="9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4" exitCode=0 Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296289 4703 generic.go:334] "Generic (PLEG): container finished" podID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerID="d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37" exitCode=0 Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296314 4703 generic.go:334] "Generic (PLEG): container finished" podID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerID="deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2" exitCode=0 Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296330 4703 generic.go:334] "Generic (PLEG): container finished" podID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerID="e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e" exitCode=143 Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296343 4703 generic.go:334] "Generic (PLEG): container finished" podID="06ffa267-20b9-4132-9f87-1218b111ebbc" containerID="b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c" exitCode=143 Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296355 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296297 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-run-netns\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296114 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerDied","Data":"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296179 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-etc-openvswitch\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296360 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-slash\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296399 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-etc-openvswitch\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296838 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerDied","Data":"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296876 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerDied","Data":"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296896 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerDied","Data":"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296910 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerDied","Data":"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296926 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerDied","Data":"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296941 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296957 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296964 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296971 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296978 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296986 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296992 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.296999 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297006 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297016 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerDied","Data":"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297030 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297037 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297044 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297052 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297059 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297080 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297092 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297099 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297111 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297112 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-run-ovn-kubernetes\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297134 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297154 4703 scope.go:117] "RemoveContainer" containerID="4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297172 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerDied","Data":"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297235 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297253 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297261 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297268 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297276 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297284 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297291 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297300 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297310 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297318 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297050 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-run-ovn-kubernetes\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297174 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5f09de14-9762-4f64-900c-df6bbfea6b78-env-overrides\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297314 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5f09de14-9762-4f64-900c-df6bbfea6b78-ovnkube-script-lib\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297329 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-n7wnf" event={"ID":"06ffa267-20b9-4132-9f87-1218b111ebbc","Type":"ContainerDied","Data":"08182a420baba462c4c12a1921e38e6566f5f755c5a8757e5b5f436b449ffa38"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297381 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297392 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297401 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297409 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297417 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297426 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297433 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297438 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-kubelet\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297441 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297483 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297511 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297415 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-host-kubelet\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297552 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-node-log\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297596 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5f09de14-9762-4f64-900c-df6bbfea6b78-ovnkube-config\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297615 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5f09de14-9762-4f64-900c-df6bbfea6b78-ovn-node-metrics-cert\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297666 4703 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297678 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbx97\" (UniqueName: \"kubernetes.io/projected/06ffa267-20b9-4132-9f87-1218b111ebbc-kube-api-access-dbx97\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297694 4703 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297705 4703 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-node-log\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297714 4703 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297722 4703 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297730 4703 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297739 4703 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297759 4703 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297777 4703 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297798 4703 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297810 4703 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/06ffa267-20b9-4132-9f87-1218b111ebbc-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297824 4703 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297838 4703 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297853 4703 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-log-socket\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297867 4703 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297881 4703 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297903 4703 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/06ffa267-20b9-4132-9f87-1218b111ebbc-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297917 4703 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/06ffa267-20b9-4132-9f87-1218b111ebbc-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.297617 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5f09de14-9762-4f64-900c-df6bbfea6b78-node-log\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.298563 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5f09de14-9762-4f64-900c-df6bbfea6b78-ovnkube-config\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.299546 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-72zlj_874a5df5-f6bd-4111-aefa-f43e43e1fcc0/kube-multus/2.log" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.300981 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-72zlj_874a5df5-f6bd-4111-aefa-f43e43e1fcc0/kube-multus/1.log" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.301009 4703 generic.go:334] "Generic (PLEG): container finished" podID="874a5df5-f6bd-4111-aefa-f43e43e1fcc0" containerID="7fbcc2e52627ea8ee7eb2c47a9d32d96b4388fac90edfd7cf994bb9dcfa14b6b" exitCode=2 Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.301036 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-72zlj" event={"ID":"874a5df5-f6bd-4111-aefa-f43e43e1fcc0","Type":"ContainerDied","Data":"7fbcc2e52627ea8ee7eb2c47a9d32d96b4388fac90edfd7cf994bb9dcfa14b6b"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.301055 4703 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"af38bdb6fc351ea256fbb5a368b87ad70202821a6a348e230d540c60694cc014"} Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.301636 4703 scope.go:117] "RemoveContainer" containerID="7fbcc2e52627ea8ee7eb2c47a9d32d96b4388fac90edfd7cf994bb9dcfa14b6b" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.301970 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5f09de14-9762-4f64-900c-df6bbfea6b78-ovn-node-metrics-cert\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.319808 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5p9mm\" (UniqueName: \"kubernetes.io/projected/5f09de14-9762-4f64-900c-df6bbfea6b78-kube-api-access-5p9mm\") pod \"ovnkube-node-d82gf\" (UID: \"5f09de14-9762-4f64-900c-df6bbfea6b78\") " pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.331957 4703 scope.go:117] "RemoveContainer" containerID="ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.361700 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-n7wnf"] Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.365006 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-n7wnf"] Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.366444 4703 scope.go:117] "RemoveContainer" containerID="cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.390940 4703 scope.go:117] "RemoveContainer" containerID="2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.417100 4703 scope.go:117] "RemoveContainer" containerID="9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.434336 4703 scope.go:117] "RemoveContainer" containerID="d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.450999 4703 scope.go:117] "RemoveContainer" containerID="deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.467486 4703 scope.go:117] "RemoveContainer" containerID="e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.483083 4703 scope.go:117] "RemoveContainer" containerID="b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.486060 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.509599 4703 scope.go:117] "RemoveContainer" containerID="e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835" Jan 30 12:09:40 crc kubenswrapper[4703]: W0130 12:09:40.513893 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f09de14_9762_4f64_900c_df6bbfea6b78.slice/crio-ebec15f2117a3972407d5c97d04524eaf8dca3710b6cc228aa24d6918533949b WatchSource:0}: Error finding container ebec15f2117a3972407d5c97d04524eaf8dca3710b6cc228aa24d6918533949b: Status 404 returned error can't find the container with id ebec15f2117a3972407d5c97d04524eaf8dca3710b6cc228aa24d6918533949b Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.567908 4703 scope.go:117] "RemoveContainer" containerID="4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.568743 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21\": container with ID starting with 4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21 not found: ID does not exist" containerID="4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.568865 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21"} err="failed to get container status \"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21\": rpc error: code = NotFound desc = could not find container \"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21\": container with ID starting with 4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.568934 4703 scope.go:117] "RemoveContainer" containerID="ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.570603 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f\": container with ID starting with ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f not found: ID does not exist" containerID="ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.570805 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f"} err="failed to get container status \"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f\": rpc error: code = NotFound desc = could not find container \"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f\": container with ID starting with ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.570855 4703 scope.go:117] "RemoveContainer" containerID="cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.571375 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\": container with ID starting with cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8 not found: ID does not exist" containerID="cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.571398 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8"} err="failed to get container status \"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\": rpc error: code = NotFound desc = could not find container \"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\": container with ID starting with cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.571452 4703 scope.go:117] "RemoveContainer" containerID="2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.572266 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\": container with ID starting with 2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0 not found: ID does not exist" containerID="2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.572289 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0"} err="failed to get container status \"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\": rpc error: code = NotFound desc = could not find container \"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\": container with ID starting with 2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.572308 4703 scope.go:117] "RemoveContainer" containerID="9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.572579 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\": container with ID starting with 9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4 not found: ID does not exist" containerID="9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.572609 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4"} err="failed to get container status \"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\": rpc error: code = NotFound desc = could not find container \"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\": container with ID starting with 9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.572632 4703 scope.go:117] "RemoveContainer" containerID="d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.572901 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\": container with ID starting with d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37 not found: ID does not exist" containerID="d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.572925 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37"} err="failed to get container status \"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\": rpc error: code = NotFound desc = could not find container \"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\": container with ID starting with d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.572944 4703 scope.go:117] "RemoveContainer" containerID="deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.573231 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\": container with ID starting with deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2 not found: ID does not exist" containerID="deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.573295 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2"} err="failed to get container status \"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\": rpc error: code = NotFound desc = could not find container \"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\": container with ID starting with deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.573312 4703 scope.go:117] "RemoveContainer" containerID="e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.573534 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\": container with ID starting with e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e not found: ID does not exist" containerID="e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.573563 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e"} err="failed to get container status \"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\": rpc error: code = NotFound desc = could not find container \"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\": container with ID starting with e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.573583 4703 scope.go:117] "RemoveContainer" containerID="b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.573852 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\": container with ID starting with b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c not found: ID does not exist" containerID="b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.573893 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c"} err="failed to get container status \"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\": rpc error: code = NotFound desc = could not find container \"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\": container with ID starting with b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.573912 4703 scope.go:117] "RemoveContainer" containerID="e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835" Jan 30 12:09:40 crc kubenswrapper[4703]: E0130 12:09:40.574179 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\": container with ID starting with e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835 not found: ID does not exist" containerID="e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.574203 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835"} err="failed to get container status \"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\": rpc error: code = NotFound desc = could not find container \"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\": container with ID starting with e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.574219 4703 scope.go:117] "RemoveContainer" containerID="4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.574537 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21"} err="failed to get container status \"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21\": rpc error: code = NotFound desc = could not find container \"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21\": container with ID starting with 4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.574570 4703 scope.go:117] "RemoveContainer" containerID="ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.574842 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f"} err="failed to get container status \"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f\": rpc error: code = NotFound desc = could not find container \"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f\": container with ID starting with ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.574857 4703 scope.go:117] "RemoveContainer" containerID="cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.579179 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8"} err="failed to get container status \"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\": rpc error: code = NotFound desc = could not find container \"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\": container with ID starting with cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.579238 4703 scope.go:117] "RemoveContainer" containerID="2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.579608 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0"} err="failed to get container status \"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\": rpc error: code = NotFound desc = could not find container \"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\": container with ID starting with 2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.579626 4703 scope.go:117] "RemoveContainer" containerID="9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.579844 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4"} err="failed to get container status \"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\": rpc error: code = NotFound desc = could not find container \"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\": container with ID starting with 9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.579871 4703 scope.go:117] "RemoveContainer" containerID="d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.580234 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37"} err="failed to get container status \"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\": rpc error: code = NotFound desc = could not find container \"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\": container with ID starting with d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.580271 4703 scope.go:117] "RemoveContainer" containerID="deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.580551 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2"} err="failed to get container status \"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\": rpc error: code = NotFound desc = could not find container \"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\": container with ID starting with deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.580580 4703 scope.go:117] "RemoveContainer" containerID="e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.580854 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e"} err="failed to get container status \"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\": rpc error: code = NotFound desc = could not find container \"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\": container with ID starting with e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.580890 4703 scope.go:117] "RemoveContainer" containerID="b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.581174 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c"} err="failed to get container status \"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\": rpc error: code = NotFound desc = could not find container \"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\": container with ID starting with b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.581193 4703 scope.go:117] "RemoveContainer" containerID="e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.581437 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835"} err="failed to get container status \"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\": rpc error: code = NotFound desc = could not find container \"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\": container with ID starting with e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.581456 4703 scope.go:117] "RemoveContainer" containerID="4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.581619 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21"} err="failed to get container status \"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21\": rpc error: code = NotFound desc = could not find container \"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21\": container with ID starting with 4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.581659 4703 scope.go:117] "RemoveContainer" containerID="ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.581839 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f"} err="failed to get container status \"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f\": rpc error: code = NotFound desc = could not find container \"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f\": container with ID starting with ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.581952 4703 scope.go:117] "RemoveContainer" containerID="cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.582239 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8"} err="failed to get container status \"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\": rpc error: code = NotFound desc = could not find container \"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\": container with ID starting with cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.582260 4703 scope.go:117] "RemoveContainer" containerID="2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.582604 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0"} err="failed to get container status \"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\": rpc error: code = NotFound desc = could not find container \"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\": container with ID starting with 2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.582626 4703 scope.go:117] "RemoveContainer" containerID="9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.582868 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4"} err="failed to get container status \"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\": rpc error: code = NotFound desc = could not find container \"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\": container with ID starting with 9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.582896 4703 scope.go:117] "RemoveContainer" containerID="d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.583293 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37"} err="failed to get container status \"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\": rpc error: code = NotFound desc = could not find container \"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\": container with ID starting with d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.583364 4703 scope.go:117] "RemoveContainer" containerID="deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.583693 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2"} err="failed to get container status \"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\": rpc error: code = NotFound desc = could not find container \"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\": container with ID starting with deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.583718 4703 scope.go:117] "RemoveContainer" containerID="e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.584152 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e"} err="failed to get container status \"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\": rpc error: code = NotFound desc = could not find container \"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\": container with ID starting with e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.584175 4703 scope.go:117] "RemoveContainer" containerID="b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.584809 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c"} err="failed to get container status \"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\": rpc error: code = NotFound desc = could not find container \"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\": container with ID starting with b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.584834 4703 scope.go:117] "RemoveContainer" containerID="e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.585055 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835"} err="failed to get container status \"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\": rpc error: code = NotFound desc = could not find container \"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\": container with ID starting with e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.585078 4703 scope.go:117] "RemoveContainer" containerID="4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.585457 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21"} err="failed to get container status \"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21\": rpc error: code = NotFound desc = could not find container \"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21\": container with ID starting with 4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.585495 4703 scope.go:117] "RemoveContainer" containerID="ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.586870 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f"} err="failed to get container status \"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f\": rpc error: code = NotFound desc = could not find container \"ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f\": container with ID starting with ce3730feff7c5447d0c1c6f9d4c39c4b16c5fb7f48d4517da5552c5994a8608f not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.586910 4703 scope.go:117] "RemoveContainer" containerID="cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.587844 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8"} err="failed to get container status \"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\": rpc error: code = NotFound desc = could not find container \"cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8\": container with ID starting with cea6262dc0634588605607369decd7b0a783895896cafe7fb0a0a64031813bf8 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.587872 4703 scope.go:117] "RemoveContainer" containerID="2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.588722 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0"} err="failed to get container status \"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\": rpc error: code = NotFound desc = could not find container \"2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0\": container with ID starting with 2194299ced3ab23c1718d92dc56bd07f4bd1e7aa03aca116a0cef32905af04d0 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.588748 4703 scope.go:117] "RemoveContainer" containerID="9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.588956 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4"} err="failed to get container status \"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\": rpc error: code = NotFound desc = could not find container \"9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4\": container with ID starting with 9655abe92b5876f9b8086afc0315f2c2f6e01cc6642bbec0287de3e2f04961d4 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.588983 4703 scope.go:117] "RemoveContainer" containerID="d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.589200 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37"} err="failed to get container status \"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\": rpc error: code = NotFound desc = could not find container \"d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37\": container with ID starting with d95f48e4bff510a70aadc1125de19cd6d9d1fa57ab04df4b4571d563d6cdda37 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.589220 4703 scope.go:117] "RemoveContainer" containerID="deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.589443 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2"} err="failed to get container status \"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\": rpc error: code = NotFound desc = could not find container \"deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2\": container with ID starting with deaaef57135c711f5a242765da8ee672f799157e5be2025b9a6c68298ce670c2 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.589464 4703 scope.go:117] "RemoveContainer" containerID="e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.589623 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e"} err="failed to get container status \"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\": rpc error: code = NotFound desc = could not find container \"e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e\": container with ID starting with e0b650f1ffb3e0f295e51451011143eabb2fe593dce7db02793865ac443c401e not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.589636 4703 scope.go:117] "RemoveContainer" containerID="b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.589836 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c"} err="failed to get container status \"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\": rpc error: code = NotFound desc = could not find container \"b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c\": container with ID starting with b459a4f059e7a280de07d9c0eb28bcb28d556bef69b7efc2c51ae8f0524f1f6c not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.589860 4703 scope.go:117] "RemoveContainer" containerID="e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.590077 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835"} err="failed to get container status \"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\": rpc error: code = NotFound desc = could not find container \"e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835\": container with ID starting with e228ede5075dd7ed459af72db2c2ca38e5cbd21bd99208aba63902cdc3209835 not found: ID does not exist" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.590104 4703 scope.go:117] "RemoveContainer" containerID="4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21" Jan 30 12:09:40 crc kubenswrapper[4703]: I0130 12:09:40.590515 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21"} err="failed to get container status \"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21\": rpc error: code = NotFound desc = could not find container \"4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21\": container with ID starting with 4cd405693a7206e02711f9d4104d330201c453d12866ce771b5ccdb3aa471d21 not found: ID does not exist" Jan 30 12:09:41 crc kubenswrapper[4703]: I0130 12:09:41.094662 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06ffa267-20b9-4132-9f87-1218b111ebbc" path="/var/lib/kubelet/pods/06ffa267-20b9-4132-9f87-1218b111ebbc/volumes" Jan 30 12:09:41 crc kubenswrapper[4703]: I0130 12:09:41.330796 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-72zlj_874a5df5-f6bd-4111-aefa-f43e43e1fcc0/kube-multus/2.log" Jan 30 12:09:41 crc kubenswrapper[4703]: I0130 12:09:41.331278 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-72zlj_874a5df5-f6bd-4111-aefa-f43e43e1fcc0/kube-multus/1.log" Jan 30 12:09:41 crc kubenswrapper[4703]: I0130 12:09:41.331336 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-72zlj" event={"ID":"874a5df5-f6bd-4111-aefa-f43e43e1fcc0","Type":"ContainerStarted","Data":"810e41d29fcc9247ca712ed68870574982d07461bbd2d23d964fb4d7413375f2"} Jan 30 12:09:41 crc kubenswrapper[4703]: I0130 12:09:41.334189 4703 generic.go:334] "Generic (PLEG): container finished" podID="5f09de14-9762-4f64-900c-df6bbfea6b78" containerID="832df8ba671061965287c008f6c01ee5dfc1babc62fb953f38e8ae283ccdb04b" exitCode=0 Jan 30 12:09:41 crc kubenswrapper[4703]: I0130 12:09:41.334296 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" event={"ID":"5f09de14-9762-4f64-900c-df6bbfea6b78","Type":"ContainerDied","Data":"832df8ba671061965287c008f6c01ee5dfc1babc62fb953f38e8ae283ccdb04b"} Jan 30 12:09:41 crc kubenswrapper[4703]: I0130 12:09:41.334332 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" event={"ID":"5f09de14-9762-4f64-900c-df6bbfea6b78","Type":"ContainerStarted","Data":"ebec15f2117a3972407d5c97d04524eaf8dca3710b6cc228aa24d6918533949b"} Jan 30 12:09:42 crc kubenswrapper[4703]: I0130 12:09:42.345059 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" event={"ID":"5f09de14-9762-4f64-900c-df6bbfea6b78","Type":"ContainerStarted","Data":"6630c5145228f7daf479f0b67badd3294f8d382ef3077ff8e5dcd7ef0d33687a"} Jan 30 12:09:42 crc kubenswrapper[4703]: I0130 12:09:42.345578 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" event={"ID":"5f09de14-9762-4f64-900c-df6bbfea6b78","Type":"ContainerStarted","Data":"a055949235e99d629424cbcd9f2e307c123788a39676434424bd95e72757f42f"} Jan 30 12:09:42 crc kubenswrapper[4703]: I0130 12:09:42.345593 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" event={"ID":"5f09de14-9762-4f64-900c-df6bbfea6b78","Type":"ContainerStarted","Data":"727873ba8cae1f27ddbea533f508e43ffc232ff4c88698858a1f9889f0e037be"} Jan 30 12:09:42 crc kubenswrapper[4703]: I0130 12:09:42.345638 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" event={"ID":"5f09de14-9762-4f64-900c-df6bbfea6b78","Type":"ContainerStarted","Data":"82517df8cad09ed0be30bbade303d4247e78fa2cdc04edd7d7dc3a881ccdf4a2"} Jan 30 12:09:42 crc kubenswrapper[4703]: I0130 12:09:42.822704 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:09:42 crc kubenswrapper[4703]: I0130 12:09:42.823188 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:09:43 crc kubenswrapper[4703]: I0130 12:09:43.356507 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" event={"ID":"5f09de14-9762-4f64-900c-df6bbfea6b78","Type":"ContainerStarted","Data":"6478c1283943ac1f6ca0f3ca92fc52345c77477c756058775d1bff759de47b94"} Jan 30 12:09:43 crc kubenswrapper[4703]: I0130 12:09:43.356596 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" event={"ID":"5f09de14-9762-4f64-900c-df6bbfea6b78","Type":"ContainerStarted","Data":"bd4073ba432262b11abf26a4c26a3736a26cbf2e26154365798c417ad09fafce"} Jan 30 12:09:45 crc kubenswrapper[4703]: I0130 12:09:45.375273 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" event={"ID":"5f09de14-9762-4f64-900c-df6bbfea6b78","Type":"ContainerStarted","Data":"db63a929e4d565d6e1d443e9d5ecb3ea9753ebe1b3d632a45e4e517eb132a117"} Jan 30 12:09:45 crc kubenswrapper[4703]: I0130 12:09:45.853390 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-687f57d79b-fpdbd" Jan 30 12:09:48 crc kubenswrapper[4703]: I0130 12:09:48.418349 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" event={"ID":"5f09de14-9762-4f64-900c-df6bbfea6b78","Type":"ContainerStarted","Data":"622458dd5908cd9702419f41e4613f89ebedc0a59d8eb41485ca9f6bbd3caa5c"} Jan 30 12:09:48 crc kubenswrapper[4703]: I0130 12:09:48.420424 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:48 crc kubenswrapper[4703]: I0130 12:09:48.420450 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:48 crc kubenswrapper[4703]: I0130 12:09:48.420498 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:48 crc kubenswrapper[4703]: I0130 12:09:48.453520 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:48 crc kubenswrapper[4703]: I0130 12:09:48.475064 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:09:48 crc kubenswrapper[4703]: I0130 12:09:48.496509 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" podStartSLOduration=8.496475738 podStartE2EDuration="8.496475738s" podCreationTimestamp="2026-01-30 12:09:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:09:48.454207483 +0000 UTC m=+824.232029137" watchObservedRunningTime="2026-01-30 12:09:48.496475738 +0000 UTC m=+824.274297392" Jan 30 12:10:06 crc kubenswrapper[4703]: I0130 12:10:06.130482 4703 scope.go:117] "RemoveContainer" containerID="af38bdb6fc351ea256fbb5a368b87ad70202821a6a348e230d540c60694cc014" Jan 30 12:10:06 crc kubenswrapper[4703]: I0130 12:10:06.542396 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-72zlj_874a5df5-f6bd-4111-aefa-f43e43e1fcc0/kube-multus/2.log" Jan 30 12:10:10 crc kubenswrapper[4703]: I0130 12:10:10.513169 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-d82gf" Jan 30 12:10:12 crc kubenswrapper[4703]: I0130 12:10:12.823661 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:10:12 crc kubenswrapper[4703]: I0130 12:10:12.824706 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:10:20 crc kubenswrapper[4703]: I0130 12:10:20.472984 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x"] Jan 30 12:10:20 crc kubenswrapper[4703]: I0130 12:10:20.474615 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" Jan 30 12:10:20 crc kubenswrapper[4703]: I0130 12:10:20.477092 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 30 12:10:20 crc kubenswrapper[4703]: I0130 12:10:20.487096 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x"] Jan 30 12:10:20 crc kubenswrapper[4703]: I0130 12:10:20.546052 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/278c6aa2-fe68-47d0-9626-1b8a42157a4c-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x\" (UID: \"278c6aa2-fe68-47d0-9626-1b8a42157a4c\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" Jan 30 12:10:20 crc kubenswrapper[4703]: I0130 12:10:20.546115 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mc4j4\" (UniqueName: \"kubernetes.io/projected/278c6aa2-fe68-47d0-9626-1b8a42157a4c-kube-api-access-mc4j4\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x\" (UID: \"278c6aa2-fe68-47d0-9626-1b8a42157a4c\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" Jan 30 12:10:20 crc kubenswrapper[4703]: I0130 12:10:20.546402 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/278c6aa2-fe68-47d0-9626-1b8a42157a4c-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x\" (UID: \"278c6aa2-fe68-47d0-9626-1b8a42157a4c\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" Jan 30 12:10:20 crc kubenswrapper[4703]: I0130 12:10:20.647470 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/278c6aa2-fe68-47d0-9626-1b8a42157a4c-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x\" (UID: \"278c6aa2-fe68-47d0-9626-1b8a42157a4c\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" Jan 30 12:10:20 crc kubenswrapper[4703]: I0130 12:10:20.647558 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/278c6aa2-fe68-47d0-9626-1b8a42157a4c-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x\" (UID: \"278c6aa2-fe68-47d0-9626-1b8a42157a4c\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" Jan 30 12:10:20 crc kubenswrapper[4703]: I0130 12:10:20.647594 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mc4j4\" (UniqueName: \"kubernetes.io/projected/278c6aa2-fe68-47d0-9626-1b8a42157a4c-kube-api-access-mc4j4\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x\" (UID: \"278c6aa2-fe68-47d0-9626-1b8a42157a4c\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" Jan 30 12:10:20 crc kubenswrapper[4703]: I0130 12:10:20.649257 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/278c6aa2-fe68-47d0-9626-1b8a42157a4c-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x\" (UID: \"278c6aa2-fe68-47d0-9626-1b8a42157a4c\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" Jan 30 12:10:20 crc kubenswrapper[4703]: I0130 12:10:20.649442 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/278c6aa2-fe68-47d0-9626-1b8a42157a4c-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x\" (UID: \"278c6aa2-fe68-47d0-9626-1b8a42157a4c\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" Jan 30 12:10:20 crc kubenswrapper[4703]: I0130 12:10:20.674977 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mc4j4\" (UniqueName: \"kubernetes.io/projected/278c6aa2-fe68-47d0-9626-1b8a42157a4c-kube-api-access-mc4j4\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x\" (UID: \"278c6aa2-fe68-47d0-9626-1b8a42157a4c\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" Jan 30 12:10:20 crc kubenswrapper[4703]: I0130 12:10:20.849082 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" Jan 30 12:10:21 crc kubenswrapper[4703]: I0130 12:10:21.094809 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x"] Jan 30 12:10:21 crc kubenswrapper[4703]: W0130 12:10:21.099671 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod278c6aa2_fe68_47d0_9626_1b8a42157a4c.slice/crio-131f329cf34363a7569ee85629676ddeb1e1bfbe97988c065b3a70588af884bd WatchSource:0}: Error finding container 131f329cf34363a7569ee85629676ddeb1e1bfbe97988c065b3a70588af884bd: Status 404 returned error can't find the container with id 131f329cf34363a7569ee85629676ddeb1e1bfbe97988c065b3a70588af884bd Jan 30 12:10:21 crc kubenswrapper[4703]: I0130 12:10:21.659466 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" event={"ID":"278c6aa2-fe68-47d0-9626-1b8a42157a4c","Type":"ContainerStarted","Data":"b3ffaaf4bab22e5966af2842718c1eb6a1bbf8568dfa45d3e8707a469e0208b7"} Jan 30 12:10:21 crc kubenswrapper[4703]: I0130 12:10:21.660198 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" event={"ID":"278c6aa2-fe68-47d0-9626-1b8a42157a4c","Type":"ContainerStarted","Data":"131f329cf34363a7569ee85629676ddeb1e1bfbe97988c065b3a70588af884bd"} Jan 30 12:10:22 crc kubenswrapper[4703]: I0130 12:10:22.669245 4703 generic.go:334] "Generic (PLEG): container finished" podID="278c6aa2-fe68-47d0-9626-1b8a42157a4c" containerID="b3ffaaf4bab22e5966af2842718c1eb6a1bbf8568dfa45d3e8707a469e0208b7" exitCode=0 Jan 30 12:10:22 crc kubenswrapper[4703]: I0130 12:10:22.669353 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" event={"ID":"278c6aa2-fe68-47d0-9626-1b8a42157a4c","Type":"ContainerDied","Data":"b3ffaaf4bab22e5966af2842718c1eb6a1bbf8568dfa45d3e8707a469e0208b7"} Jan 30 12:10:22 crc kubenswrapper[4703]: I0130 12:10:22.810240 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-c2qls"] Jan 30 12:10:22 crc kubenswrapper[4703]: I0130 12:10:22.813139 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c2qls" Jan 30 12:10:22 crc kubenswrapper[4703]: I0130 12:10:22.843593 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c2qls"] Jan 30 12:10:22 crc kubenswrapper[4703]: I0130 12:10:22.880899 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvq5n\" (UniqueName: \"kubernetes.io/projected/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-kube-api-access-rvq5n\") pod \"redhat-operators-c2qls\" (UID: \"d2d417a2-d72a-4f57-a5ed-8f09793d93bb\") " pod="openshift-marketplace/redhat-operators-c2qls" Jan 30 12:10:22 crc kubenswrapper[4703]: I0130 12:10:22.880977 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-catalog-content\") pod \"redhat-operators-c2qls\" (UID: \"d2d417a2-d72a-4f57-a5ed-8f09793d93bb\") " pod="openshift-marketplace/redhat-operators-c2qls" Jan 30 12:10:22 crc kubenswrapper[4703]: I0130 12:10:22.881004 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-utilities\") pod \"redhat-operators-c2qls\" (UID: \"d2d417a2-d72a-4f57-a5ed-8f09793d93bb\") " pod="openshift-marketplace/redhat-operators-c2qls" Jan 30 12:10:22 crc kubenswrapper[4703]: I0130 12:10:22.982543 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvq5n\" (UniqueName: \"kubernetes.io/projected/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-kube-api-access-rvq5n\") pod \"redhat-operators-c2qls\" (UID: \"d2d417a2-d72a-4f57-a5ed-8f09793d93bb\") " pod="openshift-marketplace/redhat-operators-c2qls" Jan 30 12:10:22 crc kubenswrapper[4703]: I0130 12:10:22.982651 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-catalog-content\") pod \"redhat-operators-c2qls\" (UID: \"d2d417a2-d72a-4f57-a5ed-8f09793d93bb\") " pod="openshift-marketplace/redhat-operators-c2qls" Jan 30 12:10:22 crc kubenswrapper[4703]: I0130 12:10:22.982688 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-utilities\") pod \"redhat-operators-c2qls\" (UID: \"d2d417a2-d72a-4f57-a5ed-8f09793d93bb\") " pod="openshift-marketplace/redhat-operators-c2qls" Jan 30 12:10:22 crc kubenswrapper[4703]: I0130 12:10:22.983489 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-utilities\") pod \"redhat-operators-c2qls\" (UID: \"d2d417a2-d72a-4f57-a5ed-8f09793d93bb\") " pod="openshift-marketplace/redhat-operators-c2qls" Jan 30 12:10:22 crc kubenswrapper[4703]: I0130 12:10:22.983562 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-catalog-content\") pod \"redhat-operators-c2qls\" (UID: \"d2d417a2-d72a-4f57-a5ed-8f09793d93bb\") " pod="openshift-marketplace/redhat-operators-c2qls" Jan 30 12:10:23 crc kubenswrapper[4703]: I0130 12:10:23.007185 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvq5n\" (UniqueName: \"kubernetes.io/projected/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-kube-api-access-rvq5n\") pod \"redhat-operators-c2qls\" (UID: \"d2d417a2-d72a-4f57-a5ed-8f09793d93bb\") " pod="openshift-marketplace/redhat-operators-c2qls" Jan 30 12:10:23 crc kubenswrapper[4703]: I0130 12:10:23.150002 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c2qls" Jan 30 12:10:23 crc kubenswrapper[4703]: I0130 12:10:23.409911 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-c2qls"] Jan 30 12:10:23 crc kubenswrapper[4703]: I0130 12:10:23.677267 4703 generic.go:334] "Generic (PLEG): container finished" podID="d2d417a2-d72a-4f57-a5ed-8f09793d93bb" containerID="978fd7e83530d193c4c2d43e70f8f2d85d2fcb5404e4fe3bb16c1b07676b1855" exitCode=0 Jan 30 12:10:23 crc kubenswrapper[4703]: I0130 12:10:23.677340 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2qls" event={"ID":"d2d417a2-d72a-4f57-a5ed-8f09793d93bb","Type":"ContainerDied","Data":"978fd7e83530d193c4c2d43e70f8f2d85d2fcb5404e4fe3bb16c1b07676b1855"} Jan 30 12:10:23 crc kubenswrapper[4703]: I0130 12:10:23.677385 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2qls" event={"ID":"d2d417a2-d72a-4f57-a5ed-8f09793d93bb","Type":"ContainerStarted","Data":"bbe556371abdc4f3b175dc022532d815cba06332e81a6f4c05eb25c302ebc7fd"} Jan 30 12:10:24 crc kubenswrapper[4703]: I0130 12:10:24.684980 4703 generic.go:334] "Generic (PLEG): container finished" podID="278c6aa2-fe68-47d0-9626-1b8a42157a4c" containerID="8bde4bf89fd086b9ed1fba8295b1a2f01d93aea55b73664f29b81e5559cfd7d2" exitCode=0 Jan 30 12:10:24 crc kubenswrapper[4703]: I0130 12:10:24.685142 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" event={"ID":"278c6aa2-fe68-47d0-9626-1b8a42157a4c","Type":"ContainerDied","Data":"8bde4bf89fd086b9ed1fba8295b1a2f01d93aea55b73664f29b81e5559cfd7d2"} Jan 30 12:10:24 crc kubenswrapper[4703]: I0130 12:10:24.688798 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2qls" event={"ID":"d2d417a2-d72a-4f57-a5ed-8f09793d93bb","Type":"ContainerStarted","Data":"befb373b0660498d3fd7aaa936857dc42fb57e6b79c4fd85aaded11a1ccff000"} Jan 30 12:10:25 crc kubenswrapper[4703]: I0130 12:10:25.697906 4703 generic.go:334] "Generic (PLEG): container finished" podID="278c6aa2-fe68-47d0-9626-1b8a42157a4c" containerID="1c83cbfec95574fb486dc43cad554a5e0700e466381eeafd13d51b7c74b5a9da" exitCode=0 Jan 30 12:10:25 crc kubenswrapper[4703]: I0130 12:10:25.698012 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" event={"ID":"278c6aa2-fe68-47d0-9626-1b8a42157a4c","Type":"ContainerDied","Data":"1c83cbfec95574fb486dc43cad554a5e0700e466381eeafd13d51b7c74b5a9da"} Jan 30 12:10:26 crc kubenswrapper[4703]: I0130 12:10:26.705802 4703 generic.go:334] "Generic (PLEG): container finished" podID="d2d417a2-d72a-4f57-a5ed-8f09793d93bb" containerID="befb373b0660498d3fd7aaa936857dc42fb57e6b79c4fd85aaded11a1ccff000" exitCode=0 Jan 30 12:10:26 crc kubenswrapper[4703]: I0130 12:10:26.706514 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2qls" event={"ID":"d2d417a2-d72a-4f57-a5ed-8f09793d93bb","Type":"ContainerDied","Data":"befb373b0660498d3fd7aaa936857dc42fb57e6b79c4fd85aaded11a1ccff000"} Jan 30 12:10:27 crc kubenswrapper[4703]: I0130 12:10:27.001036 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" Jan 30 12:10:27 crc kubenswrapper[4703]: I0130 12:10:27.148117 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/278c6aa2-fe68-47d0-9626-1b8a42157a4c-util\") pod \"278c6aa2-fe68-47d0-9626-1b8a42157a4c\" (UID: \"278c6aa2-fe68-47d0-9626-1b8a42157a4c\") " Jan 30 12:10:27 crc kubenswrapper[4703]: I0130 12:10:27.148212 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mc4j4\" (UniqueName: \"kubernetes.io/projected/278c6aa2-fe68-47d0-9626-1b8a42157a4c-kube-api-access-mc4j4\") pod \"278c6aa2-fe68-47d0-9626-1b8a42157a4c\" (UID: \"278c6aa2-fe68-47d0-9626-1b8a42157a4c\") " Jan 30 12:10:27 crc kubenswrapper[4703]: I0130 12:10:27.148303 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/278c6aa2-fe68-47d0-9626-1b8a42157a4c-bundle\") pod \"278c6aa2-fe68-47d0-9626-1b8a42157a4c\" (UID: \"278c6aa2-fe68-47d0-9626-1b8a42157a4c\") " Jan 30 12:10:27 crc kubenswrapper[4703]: I0130 12:10:27.150931 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/278c6aa2-fe68-47d0-9626-1b8a42157a4c-bundle" (OuterVolumeSpecName: "bundle") pod "278c6aa2-fe68-47d0-9626-1b8a42157a4c" (UID: "278c6aa2-fe68-47d0-9626-1b8a42157a4c"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:10:27 crc kubenswrapper[4703]: I0130 12:10:27.156106 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/278c6aa2-fe68-47d0-9626-1b8a42157a4c-kube-api-access-mc4j4" (OuterVolumeSpecName: "kube-api-access-mc4j4") pod "278c6aa2-fe68-47d0-9626-1b8a42157a4c" (UID: "278c6aa2-fe68-47d0-9626-1b8a42157a4c"). InnerVolumeSpecName "kube-api-access-mc4j4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:10:27 crc kubenswrapper[4703]: I0130 12:10:27.167109 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/278c6aa2-fe68-47d0-9626-1b8a42157a4c-util" (OuterVolumeSpecName: "util") pod "278c6aa2-fe68-47d0-9626-1b8a42157a4c" (UID: "278c6aa2-fe68-47d0-9626-1b8a42157a4c"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:10:27 crc kubenswrapper[4703]: I0130 12:10:27.250304 4703 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/278c6aa2-fe68-47d0-9626-1b8a42157a4c-util\") on node \"crc\" DevicePath \"\"" Jan 30 12:10:27 crc kubenswrapper[4703]: I0130 12:10:27.250345 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mc4j4\" (UniqueName: \"kubernetes.io/projected/278c6aa2-fe68-47d0-9626-1b8a42157a4c-kube-api-access-mc4j4\") on node \"crc\" DevicePath \"\"" Jan 30 12:10:27 crc kubenswrapper[4703]: I0130 12:10:27.250356 4703 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/278c6aa2-fe68-47d0-9626-1b8a42157a4c-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:10:27 crc kubenswrapper[4703]: I0130 12:10:27.715439 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" event={"ID":"278c6aa2-fe68-47d0-9626-1b8a42157a4c","Type":"ContainerDied","Data":"131f329cf34363a7569ee85629676ddeb1e1bfbe97988c065b3a70588af884bd"} Jan 30 12:10:27 crc kubenswrapper[4703]: I0130 12:10:27.715491 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x" Jan 30 12:10:27 crc kubenswrapper[4703]: I0130 12:10:27.715507 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="131f329cf34363a7569ee85629676ddeb1e1bfbe97988c065b3a70588af884bd" Jan 30 12:10:27 crc kubenswrapper[4703]: I0130 12:10:27.718571 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2qls" event={"ID":"d2d417a2-d72a-4f57-a5ed-8f09793d93bb","Type":"ContainerStarted","Data":"850a1e55e0b4d476d30f27880a1d9931234da437f668d3c917875be9ca1aa1ff"} Jan 30 12:10:27 crc kubenswrapper[4703]: I0130 12:10:27.744044 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-c2qls" podStartSLOduration=2.261039177 podStartE2EDuration="5.744014088s" podCreationTimestamp="2026-01-30 12:10:22 +0000 UTC" firstStartedPulling="2026-01-30 12:10:23.811645257 +0000 UTC m=+859.589466911" lastFinishedPulling="2026-01-30 12:10:27.294620168 +0000 UTC m=+863.072441822" observedRunningTime="2026-01-30 12:10:27.739828616 +0000 UTC m=+863.517650290" watchObservedRunningTime="2026-01-30 12:10:27.744014088 +0000 UTC m=+863.521835742" Jan 30 12:10:33 crc kubenswrapper[4703]: I0130 12:10:33.163591 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-c2qls" Jan 30 12:10:33 crc kubenswrapper[4703]: I0130 12:10:33.164472 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-c2qls" Jan 30 12:10:34 crc kubenswrapper[4703]: I0130 12:10:34.224739 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-c2qls" podUID="d2d417a2-d72a-4f57-a5ed-8f09793d93bb" containerName="registry-server" probeResult="failure" output=< Jan 30 12:10:34 crc kubenswrapper[4703]: timeout: failed to connect service ":50051" within 1s Jan 30 12:10:34 crc kubenswrapper[4703]: > Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.381488 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-2dr52"] Jan 30 12:10:38 crc kubenswrapper[4703]: E0130 12:10:38.382706 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="278c6aa2-fe68-47d0-9626-1b8a42157a4c" containerName="pull" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.382816 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="278c6aa2-fe68-47d0-9626-1b8a42157a4c" containerName="pull" Jan 30 12:10:38 crc kubenswrapper[4703]: E0130 12:10:38.382885 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="278c6aa2-fe68-47d0-9626-1b8a42157a4c" containerName="extract" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.382940 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="278c6aa2-fe68-47d0-9626-1b8a42157a4c" containerName="extract" Jan 30 12:10:38 crc kubenswrapper[4703]: E0130 12:10:38.383000 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="278c6aa2-fe68-47d0-9626-1b8a42157a4c" containerName="util" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.383052 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="278c6aa2-fe68-47d0-9626-1b8a42157a4c" containerName="util" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.383291 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="278c6aa2-fe68-47d0-9626-1b8a42157a4c" containerName="extract" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.383968 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-2dr52" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.386232 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-cn2x8" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.386704 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.386887 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.399560 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-2dr52"] Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.506529 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t69gn\" (UniqueName: \"kubernetes.io/projected/1bba418c-e14f-4710-b936-9431dee41382-kube-api-access-t69gn\") pod \"obo-prometheus-operator-68bc856cb9-2dr52\" (UID: \"1bba418c-e14f-4710-b936-9431dee41382\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-2dr52" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.521780 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w"] Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.523002 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.528794 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.528883 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-qf6d2" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.541268 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4"] Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.550342 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.556727 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w"] Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.608015 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t69gn\" (UniqueName: \"kubernetes.io/projected/1bba418c-e14f-4710-b936-9431dee41382-kube-api-access-t69gn\") pod \"obo-prometheus-operator-68bc856cb9-2dr52\" (UID: \"1bba418c-e14f-4710-b936-9431dee41382\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-2dr52" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.627778 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4"] Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.643369 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t69gn\" (UniqueName: \"kubernetes.io/projected/1bba418c-e14f-4710-b936-9431dee41382-kube-api-access-t69gn\") pod \"obo-prometheus-operator-68bc856cb9-2dr52\" (UID: \"1bba418c-e14f-4710-b936-9431dee41382\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-2dr52" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.709879 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/51d9f47f-4fa1-4632-be7a-f92fcb054ae6-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w\" (UID: \"51d9f47f-4fa1-4632-be7a-f92fcb054ae6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.709957 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f5d72830-ff3a-4578-9e66-c738cbf1a7cf-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4\" (UID: \"f5d72830-ff3a-4578-9e66-c738cbf1a7cf\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.709993 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/51d9f47f-4fa1-4632-be7a-f92fcb054ae6-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w\" (UID: \"51d9f47f-4fa1-4632-be7a-f92fcb054ae6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.710046 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f5d72830-ff3a-4578-9e66-c738cbf1a7cf-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4\" (UID: \"f5d72830-ff3a-4578-9e66-c738cbf1a7cf\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.729981 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-skwqz"] Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.731166 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-skwqz" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.732490 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-2dr52" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.734831 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-24w2p" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.737218 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.810253 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-skwqz"] Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.811057 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f5d72830-ff3a-4578-9e66-c738cbf1a7cf-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4\" (UID: \"f5d72830-ff3a-4578-9e66-c738cbf1a7cf\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.811213 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/51d9f47f-4fa1-4632-be7a-f92fcb054ae6-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w\" (UID: \"51d9f47f-4fa1-4632-be7a-f92fcb054ae6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.811238 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f5d72830-ff3a-4578-9e66-c738cbf1a7cf-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4\" (UID: \"f5d72830-ff3a-4578-9e66-c738cbf1a7cf\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.811275 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/51d9f47f-4fa1-4632-be7a-f92fcb054ae6-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w\" (UID: \"51d9f47f-4fa1-4632-be7a-f92fcb054ae6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.820998 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f5d72830-ff3a-4578-9e66-c738cbf1a7cf-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4\" (UID: \"f5d72830-ff3a-4578-9e66-c738cbf1a7cf\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.822153 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/51d9f47f-4fa1-4632-be7a-f92fcb054ae6-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w\" (UID: \"51d9f47f-4fa1-4632-be7a-f92fcb054ae6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.830763 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/51d9f47f-4fa1-4632-be7a-f92fcb054ae6-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w\" (UID: \"51d9f47f-4fa1-4632-be7a-f92fcb054ae6\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.831830 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f5d72830-ff3a-4578-9e66-c738cbf1a7cf-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4\" (UID: \"f5d72830-ff3a-4578-9e66-c738cbf1a7cf\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.843998 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.912533 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/4bd5df43-64da-4ab9-8006-83056b6d0d5e-observability-operator-tls\") pod \"observability-operator-59bdc8b94-skwqz\" (UID: \"4bd5df43-64da-4ab9-8006-83056b6d0d5e\") " pod="openshift-operators/observability-operator-59bdc8b94-skwqz" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.912957 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gp7mh\" (UniqueName: \"kubernetes.io/projected/4bd5df43-64da-4ab9-8006-83056b6d0d5e-kube-api-access-gp7mh\") pod \"observability-operator-59bdc8b94-skwqz\" (UID: \"4bd5df43-64da-4ab9-8006-83056b6d0d5e\") " pod="openshift-operators/observability-operator-59bdc8b94-skwqz" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.916203 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.951916 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-n9zw2"] Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.963417 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-n9zw2" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.976427 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-t9c56" Jan 30 12:10:38 crc kubenswrapper[4703]: I0130 12:10:38.985550 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-n9zw2"] Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.014056 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gp7mh\" (UniqueName: \"kubernetes.io/projected/4bd5df43-64da-4ab9-8006-83056b6d0d5e-kube-api-access-gp7mh\") pod \"observability-operator-59bdc8b94-skwqz\" (UID: \"4bd5df43-64da-4ab9-8006-83056b6d0d5e\") " pod="openshift-operators/observability-operator-59bdc8b94-skwqz" Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.014167 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/4bd5df43-64da-4ab9-8006-83056b6d0d5e-observability-operator-tls\") pod \"observability-operator-59bdc8b94-skwqz\" (UID: \"4bd5df43-64da-4ab9-8006-83056b6d0d5e\") " pod="openshift-operators/observability-operator-59bdc8b94-skwqz" Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.021474 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/4bd5df43-64da-4ab9-8006-83056b6d0d5e-observability-operator-tls\") pod \"observability-operator-59bdc8b94-skwqz\" (UID: \"4bd5df43-64da-4ab9-8006-83056b6d0d5e\") " pod="openshift-operators/observability-operator-59bdc8b94-skwqz" Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.037263 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gp7mh\" (UniqueName: \"kubernetes.io/projected/4bd5df43-64da-4ab9-8006-83056b6d0d5e-kube-api-access-gp7mh\") pod \"observability-operator-59bdc8b94-skwqz\" (UID: \"4bd5df43-64da-4ab9-8006-83056b6d0d5e\") " pod="openshift-operators/observability-operator-59bdc8b94-skwqz" Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.052834 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-skwqz" Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.115575 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/74ad7d1c-6e7a-4f7e-a441-e301ec3593ab-openshift-service-ca\") pod \"perses-operator-5bf474d74f-n9zw2\" (UID: \"74ad7d1c-6e7a-4f7e-a441-e301ec3593ab\") " pod="openshift-operators/perses-operator-5bf474d74f-n9zw2" Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.116070 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6sw9k\" (UniqueName: \"kubernetes.io/projected/74ad7d1c-6e7a-4f7e-a441-e301ec3593ab-kube-api-access-6sw9k\") pod \"perses-operator-5bf474d74f-n9zw2\" (UID: \"74ad7d1c-6e7a-4f7e-a441-e301ec3593ab\") " pod="openshift-operators/perses-operator-5bf474d74f-n9zw2" Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.208452 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-2dr52"] Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.217347 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/74ad7d1c-6e7a-4f7e-a441-e301ec3593ab-openshift-service-ca\") pod \"perses-operator-5bf474d74f-n9zw2\" (UID: \"74ad7d1c-6e7a-4f7e-a441-e301ec3593ab\") " pod="openshift-operators/perses-operator-5bf474d74f-n9zw2" Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.217434 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6sw9k\" (UniqueName: \"kubernetes.io/projected/74ad7d1c-6e7a-4f7e-a441-e301ec3593ab-kube-api-access-6sw9k\") pod \"perses-operator-5bf474d74f-n9zw2\" (UID: \"74ad7d1c-6e7a-4f7e-a441-e301ec3593ab\") " pod="openshift-operators/perses-operator-5bf474d74f-n9zw2" Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.218715 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/74ad7d1c-6e7a-4f7e-a441-e301ec3593ab-openshift-service-ca\") pod \"perses-operator-5bf474d74f-n9zw2\" (UID: \"74ad7d1c-6e7a-4f7e-a441-e301ec3593ab\") " pod="openshift-operators/perses-operator-5bf474d74f-n9zw2" Jan 30 12:10:39 crc kubenswrapper[4703]: W0130 12:10:39.245595 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1bba418c_e14f_4710_b936_9431dee41382.slice/crio-f909b32b2909ecfd4b497e8ce2d1fbe5a0d36be20edcb20b266ff9bff2f8b3c7 WatchSource:0}: Error finding container f909b32b2909ecfd4b497e8ce2d1fbe5a0d36be20edcb20b266ff9bff2f8b3c7: Status 404 returned error can't find the container with id f909b32b2909ecfd4b497e8ce2d1fbe5a0d36be20edcb20b266ff9bff2f8b3c7 Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.250330 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6sw9k\" (UniqueName: \"kubernetes.io/projected/74ad7d1c-6e7a-4f7e-a441-e301ec3593ab-kube-api-access-6sw9k\") pod \"perses-operator-5bf474d74f-n9zw2\" (UID: \"74ad7d1c-6e7a-4f7e-a441-e301ec3593ab\") " pod="openshift-operators/perses-operator-5bf474d74f-n9zw2" Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.297511 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-n9zw2" Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.481738 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w"] Jan 30 12:10:39 crc kubenswrapper[4703]: W0130 12:10:39.505628 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod51d9f47f_4fa1_4632_be7a_f92fcb054ae6.slice/crio-fcbbd39ed5154744559ebaee90166aa05e2a6fd0620a73e867d29fff8f3db6fd WatchSource:0}: Error finding container fcbbd39ed5154744559ebaee90166aa05e2a6fd0620a73e867d29fff8f3db6fd: Status 404 returned error can't find the container with id fcbbd39ed5154744559ebaee90166aa05e2a6fd0620a73e867d29fff8f3db6fd Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.529084 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-skwqz"] Jan 30 12:10:39 crc kubenswrapper[4703]: W0130 12:10:39.538325 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4bd5df43_64da_4ab9_8006_83056b6d0d5e.slice/crio-9553b7e6495a6c79f96d414e9dcaa0bcb43aac156882f86547b103354862441d WatchSource:0}: Error finding container 9553b7e6495a6c79f96d414e9dcaa0bcb43aac156882f86547b103354862441d: Status 404 returned error can't find the container with id 9553b7e6495a6c79f96d414e9dcaa0bcb43aac156882f86547b103354862441d Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.550281 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4"] Jan 30 12:10:39 crc kubenswrapper[4703]: W0130 12:10:39.589777 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf5d72830_ff3a_4578_9e66_c738cbf1a7cf.slice/crio-bf7289523e330667342074743b729e37d4873626a70e75fb2b1b7ece79f89727 WatchSource:0}: Error finding container bf7289523e330667342074743b729e37d4873626a70e75fb2b1b7ece79f89727: Status 404 returned error can't find the container with id bf7289523e330667342074743b729e37d4873626a70e75fb2b1b7ece79f89727 Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.805748 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4" event={"ID":"f5d72830-ff3a-4578-9e66-c738cbf1a7cf","Type":"ContainerStarted","Data":"bf7289523e330667342074743b729e37d4873626a70e75fb2b1b7ece79f89727"} Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.807807 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w" event={"ID":"51d9f47f-4fa1-4632-be7a-f92fcb054ae6","Type":"ContainerStarted","Data":"fcbbd39ed5154744559ebaee90166aa05e2a6fd0620a73e867d29fff8f3db6fd"} Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.809145 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-2dr52" event={"ID":"1bba418c-e14f-4710-b936-9431dee41382","Type":"ContainerStarted","Data":"f909b32b2909ecfd4b497e8ce2d1fbe5a0d36be20edcb20b266ff9bff2f8b3c7"} Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.810666 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-skwqz" event={"ID":"4bd5df43-64da-4ab9-8006-83056b6d0d5e","Type":"ContainerStarted","Data":"9553b7e6495a6c79f96d414e9dcaa0bcb43aac156882f86547b103354862441d"} Jan 30 12:10:39 crc kubenswrapper[4703]: I0130 12:10:39.901879 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-n9zw2"] Jan 30 12:10:39 crc kubenswrapper[4703]: W0130 12:10:39.906548 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod74ad7d1c_6e7a_4f7e_a441_e301ec3593ab.slice/crio-65662ffa89ddf4a9a177b877e9e937420a4ae9cf0264c5f45b593e60206f723f WatchSource:0}: Error finding container 65662ffa89ddf4a9a177b877e9e937420a4ae9cf0264c5f45b593e60206f723f: Status 404 returned error can't find the container with id 65662ffa89ddf4a9a177b877e9e937420a4ae9cf0264c5f45b593e60206f723f Jan 30 12:10:40 crc kubenswrapper[4703]: I0130 12:10:40.891835 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-n9zw2" event={"ID":"74ad7d1c-6e7a-4f7e-a441-e301ec3593ab","Type":"ContainerStarted","Data":"65662ffa89ddf4a9a177b877e9e937420a4ae9cf0264c5f45b593e60206f723f"} Jan 30 12:10:42 crc kubenswrapper[4703]: I0130 12:10:42.825099 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:10:42 crc kubenswrapper[4703]: I0130 12:10:42.825684 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:10:42 crc kubenswrapper[4703]: I0130 12:10:42.825903 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 12:10:42 crc kubenswrapper[4703]: I0130 12:10:42.826737 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"55a18e70c04b9c0432452c8dbe489a57bb034e3a138ce8caf3e700f751921742"} pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 12:10:42 crc kubenswrapper[4703]: I0130 12:10:42.826802 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" containerID="cri-o://55a18e70c04b9c0432452c8dbe489a57bb034e3a138ce8caf3e700f751921742" gracePeriod=600 Jan 30 12:10:43 crc kubenswrapper[4703]: I0130 12:10:43.301024 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-c2qls" Jan 30 12:10:43 crc kubenswrapper[4703]: I0130 12:10:43.376238 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-c2qls" Jan 30 12:10:44 crc kubenswrapper[4703]: I0130 12:10:44.481175 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c2qls"] Jan 30 12:10:44 crc kubenswrapper[4703]: I0130 12:10:44.522554 4703 generic.go:334] "Generic (PLEG): container finished" podID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerID="55a18e70c04b9c0432452c8dbe489a57bb034e3a138ce8caf3e700f751921742" exitCode=0 Jan 30 12:10:44 crc kubenswrapper[4703]: I0130 12:10:44.522888 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerDied","Data":"55a18e70c04b9c0432452c8dbe489a57bb034e3a138ce8caf3e700f751921742"} Jan 30 12:10:44 crc kubenswrapper[4703]: I0130 12:10:44.523005 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerStarted","Data":"cc192085768faef3bd05075caea9f2c24722e52bca08578d68a4a914317757f0"} Jan 30 12:10:44 crc kubenswrapper[4703]: I0130 12:10:44.523028 4703 scope.go:117] "RemoveContainer" containerID="2897b756e6469910e024f9272f3f823e188d794a43ea99cbf356f2af1315b70e" Jan 30 12:10:45 crc kubenswrapper[4703]: I0130 12:10:45.532150 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-c2qls" podUID="d2d417a2-d72a-4f57-a5ed-8f09793d93bb" containerName="registry-server" containerID="cri-o://850a1e55e0b4d476d30f27880a1d9931234da437f668d3c917875be9ca1aa1ff" gracePeriod=2 Jan 30 12:10:46 crc kubenswrapper[4703]: I0130 12:10:46.556453 4703 generic.go:334] "Generic (PLEG): container finished" podID="d2d417a2-d72a-4f57-a5ed-8f09793d93bb" containerID="850a1e55e0b4d476d30f27880a1d9931234da437f668d3c917875be9ca1aa1ff" exitCode=0 Jan 30 12:10:46 crc kubenswrapper[4703]: I0130 12:10:46.556977 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2qls" event={"ID":"d2d417a2-d72a-4f57-a5ed-8f09793d93bb","Type":"ContainerDied","Data":"850a1e55e0b4d476d30f27880a1d9931234da437f668d3c917875be9ca1aa1ff"} Jan 30 12:10:50 crc kubenswrapper[4703]: I0130 12:10:50.084335 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c2qls" Jan 30 12:10:50 crc kubenswrapper[4703]: I0130 12:10:50.094075 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvq5n\" (UniqueName: \"kubernetes.io/projected/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-kube-api-access-rvq5n\") pod \"d2d417a2-d72a-4f57-a5ed-8f09793d93bb\" (UID: \"d2d417a2-d72a-4f57-a5ed-8f09793d93bb\") " Jan 30 12:10:50 crc kubenswrapper[4703]: I0130 12:10:50.094157 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-utilities\") pod \"d2d417a2-d72a-4f57-a5ed-8f09793d93bb\" (UID: \"d2d417a2-d72a-4f57-a5ed-8f09793d93bb\") " Jan 30 12:10:50 crc kubenswrapper[4703]: I0130 12:10:50.094232 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-catalog-content\") pod \"d2d417a2-d72a-4f57-a5ed-8f09793d93bb\" (UID: \"d2d417a2-d72a-4f57-a5ed-8f09793d93bb\") " Jan 30 12:10:50 crc kubenswrapper[4703]: I0130 12:10:50.095139 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-utilities" (OuterVolumeSpecName: "utilities") pod "d2d417a2-d72a-4f57-a5ed-8f09793d93bb" (UID: "d2d417a2-d72a-4f57-a5ed-8f09793d93bb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:10:50 crc kubenswrapper[4703]: I0130 12:10:50.134380 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-kube-api-access-rvq5n" (OuterVolumeSpecName: "kube-api-access-rvq5n") pod "d2d417a2-d72a-4f57-a5ed-8f09793d93bb" (UID: "d2d417a2-d72a-4f57-a5ed-8f09793d93bb"). InnerVolumeSpecName "kube-api-access-rvq5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:10:50 crc kubenswrapper[4703]: I0130 12:10:50.196380 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvq5n\" (UniqueName: \"kubernetes.io/projected/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-kube-api-access-rvq5n\") on node \"crc\" DevicePath \"\"" Jan 30 12:10:50 crc kubenswrapper[4703]: I0130 12:10:50.196433 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:10:50 crc kubenswrapper[4703]: I0130 12:10:50.251998 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d2d417a2-d72a-4f57-a5ed-8f09793d93bb" (UID: "d2d417a2-d72a-4f57-a5ed-8f09793d93bb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:10:50 crc kubenswrapper[4703]: I0130 12:10:50.297941 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2d417a2-d72a-4f57-a5ed-8f09793d93bb-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:10:50 crc kubenswrapper[4703]: I0130 12:10:50.602841 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-c2qls" event={"ID":"d2d417a2-d72a-4f57-a5ed-8f09793d93bb","Type":"ContainerDied","Data":"bbe556371abdc4f3b175dc022532d815cba06332e81a6f4c05eb25c302ebc7fd"} Jan 30 12:10:50 crc kubenswrapper[4703]: I0130 12:10:50.602928 4703 scope.go:117] "RemoveContainer" containerID="850a1e55e0b4d476d30f27880a1d9931234da437f668d3c917875be9ca1aa1ff" Jan 30 12:10:50 crc kubenswrapper[4703]: I0130 12:10:50.602993 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-c2qls" Jan 30 12:10:50 crc kubenswrapper[4703]: I0130 12:10:50.646591 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-c2qls"] Jan 30 12:10:50 crc kubenswrapper[4703]: I0130 12:10:50.656589 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-c2qls"] Jan 30 12:10:51 crc kubenswrapper[4703]: I0130 12:10:51.096684 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2d417a2-d72a-4f57-a5ed-8f09793d93bb" path="/var/lib/kubelet/pods/d2d417a2-d72a-4f57-a5ed-8f09793d93bb/volumes" Jan 30 12:10:56 crc kubenswrapper[4703]: I0130 12:10:56.034342 4703 scope.go:117] "RemoveContainer" containerID="befb373b0660498d3fd7aaa936857dc42fb57e6b79c4fd85aaded11a1ccff000" Jan 30 12:10:56 crc kubenswrapper[4703]: I0130 12:10:56.098943 4703 scope.go:117] "RemoveContainer" containerID="978fd7e83530d193c4c2d43e70f8f2d85d2fcb5404e4fe3bb16c1b07676b1855" Jan 30 12:10:56 crc kubenswrapper[4703]: I0130 12:10:56.646049 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-skwqz" event={"ID":"4bd5df43-64da-4ab9-8006-83056b6d0d5e","Type":"ContainerStarted","Data":"fbfd9d15db414a254dd3ce95bee9a86cfc3cc480d68d00fa61278e2e67b366f7"} Jan 30 12:10:56 crc kubenswrapper[4703]: I0130 12:10:56.646529 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-59bdc8b94-skwqz" Jan 30 12:10:56 crc kubenswrapper[4703]: I0130 12:10:56.647802 4703 patch_prober.go:28] interesting pod/observability-operator-59bdc8b94-skwqz container/operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.36:8081/healthz\": dial tcp 10.217.0.36:8081: connect: connection refused" start-of-body= Jan 30 12:10:56 crc kubenswrapper[4703]: I0130 12:10:56.647868 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/observability-operator-59bdc8b94-skwqz" podUID="4bd5df43-64da-4ab9-8006-83056b6d0d5e" containerName="operator" probeResult="failure" output="Get \"http://10.217.0.36:8081/healthz\": dial tcp 10.217.0.36:8081: connect: connection refused" Jan 30 12:10:56 crc kubenswrapper[4703]: I0130 12:10:56.649162 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-2dr52" event={"ID":"1bba418c-e14f-4710-b936-9431dee41382","Type":"ContainerStarted","Data":"1c90b8ee07d344811da705f91a66f66ab6b2da7b3c71fce7629711d6aefb9a3e"} Jan 30 12:10:56 crc kubenswrapper[4703]: I0130 12:10:56.652780 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4" event={"ID":"f5d72830-ff3a-4578-9e66-c738cbf1a7cf","Type":"ContainerStarted","Data":"e59b1648befd7fca78458fbac6e49afada2ae89b387075c5d0204afee9728305"} Jan 30 12:10:56 crc kubenswrapper[4703]: I0130 12:10:56.655288 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w" event={"ID":"51d9f47f-4fa1-4632-be7a-f92fcb054ae6","Type":"ContainerStarted","Data":"82d477bb0a52983155a44c2465250b298823f358fbe135877bd8ca9c4db76af5"} Jan 30 12:10:56 crc kubenswrapper[4703]: I0130 12:10:56.657853 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-n9zw2" event={"ID":"74ad7d1c-6e7a-4f7e-a441-e301ec3593ab","Type":"ContainerStarted","Data":"e4d67cbf46091f7cd074997fb1e4deffd5ed8e7c4da7ce5c94456c85ba85d64c"} Jan 30 12:10:56 crc kubenswrapper[4703]: I0130 12:10:56.658042 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5bf474d74f-n9zw2" Jan 30 12:10:56 crc kubenswrapper[4703]: I0130 12:10:56.702044 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-59bdc8b94-skwqz" podStartSLOduration=2.067948453 podStartE2EDuration="18.702012115s" podCreationTimestamp="2026-01-30 12:10:38 +0000 UTC" firstStartedPulling="2026-01-30 12:10:39.555425517 +0000 UTC m=+875.333247171" lastFinishedPulling="2026-01-30 12:10:56.189489179 +0000 UTC m=+891.967310833" observedRunningTime="2026-01-30 12:10:56.68988126 +0000 UTC m=+892.467702934" watchObservedRunningTime="2026-01-30 12:10:56.702012115 +0000 UTC m=+892.479833779" Jan 30 12:10:56 crc kubenswrapper[4703]: I0130 12:10:56.757659 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4" podStartSLOduration=2.288919878 podStartE2EDuration="18.757629199s" podCreationTimestamp="2026-01-30 12:10:38 +0000 UTC" firstStartedPulling="2026-01-30 12:10:39.59504274 +0000 UTC m=+875.372864394" lastFinishedPulling="2026-01-30 12:10:56.063752071 +0000 UTC m=+891.841573715" observedRunningTime="2026-01-30 12:10:56.748497184 +0000 UTC m=+892.526318848" watchObservedRunningTime="2026-01-30 12:10:56.757629199 +0000 UTC m=+892.535450853" Jan 30 12:10:56 crc kubenswrapper[4703]: I0130 12:10:56.816568 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w" podStartSLOduration=2.230677933 podStartE2EDuration="18.816544741s" podCreationTimestamp="2026-01-30 12:10:38 +0000 UTC" firstStartedPulling="2026-01-30 12:10:39.511844376 +0000 UTC m=+875.289666030" lastFinishedPulling="2026-01-30 12:10:56.097711184 +0000 UTC m=+891.875532838" observedRunningTime="2026-01-30 12:10:56.780980446 +0000 UTC m=+892.558802110" watchObservedRunningTime="2026-01-30 12:10:56.816544741 +0000 UTC m=+892.594366385" Jan 30 12:10:56 crc kubenswrapper[4703]: I0130 12:10:56.839348 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5bf474d74f-n9zw2" podStartSLOduration=2.651401693 podStartE2EDuration="18.839310922s" podCreationTimestamp="2026-01-30 12:10:38 +0000 UTC" firstStartedPulling="2026-01-30 12:10:39.909923958 +0000 UTC m=+875.687745612" lastFinishedPulling="2026-01-30 12:10:56.097833177 +0000 UTC m=+891.875654841" observedRunningTime="2026-01-30 12:10:56.833197759 +0000 UTC m=+892.611019413" watchObservedRunningTime="2026-01-30 12:10:56.839310922 +0000 UTC m=+892.617132576" Jan 30 12:10:56 crc kubenswrapper[4703]: I0130 12:10:56.899700 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-2dr52" podStartSLOduration=2.055074817 podStartE2EDuration="18.899669914s" podCreationTimestamp="2026-01-30 12:10:38 +0000 UTC" firstStartedPulling="2026-01-30 12:10:39.253110157 +0000 UTC m=+875.030931811" lastFinishedPulling="2026-01-30 12:10:56.097705254 +0000 UTC m=+891.875526908" observedRunningTime="2026-01-30 12:10:56.876695317 +0000 UTC m=+892.654516971" watchObservedRunningTime="2026-01-30 12:10:56.899669914 +0000 UTC m=+892.677491568" Jan 30 12:10:57 crc kubenswrapper[4703]: I0130 12:10:57.669817 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-59bdc8b94-skwqz" Jan 30 12:11:09 crc kubenswrapper[4703]: I0130 12:11:09.301100 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5bf474d74f-n9zw2" Jan 30 12:11:26 crc kubenswrapper[4703]: I0130 12:11:26.962970 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4"] Jan 30 12:11:26 crc kubenswrapper[4703]: E0130 12:11:26.963995 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2d417a2-d72a-4f57-a5ed-8f09793d93bb" containerName="registry-server" Jan 30 12:11:26 crc kubenswrapper[4703]: I0130 12:11:26.964022 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2d417a2-d72a-4f57-a5ed-8f09793d93bb" containerName="registry-server" Jan 30 12:11:26 crc kubenswrapper[4703]: E0130 12:11:26.964037 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2d417a2-d72a-4f57-a5ed-8f09793d93bb" containerName="extract-content" Jan 30 12:11:26 crc kubenswrapper[4703]: I0130 12:11:26.964045 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2d417a2-d72a-4f57-a5ed-8f09793d93bb" containerName="extract-content" Jan 30 12:11:26 crc kubenswrapper[4703]: E0130 12:11:26.964068 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2d417a2-d72a-4f57-a5ed-8f09793d93bb" containerName="extract-utilities" Jan 30 12:11:26 crc kubenswrapper[4703]: I0130 12:11:26.964075 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2d417a2-d72a-4f57-a5ed-8f09793d93bb" containerName="extract-utilities" Jan 30 12:11:26 crc kubenswrapper[4703]: I0130 12:11:26.964200 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2d417a2-d72a-4f57-a5ed-8f09793d93bb" containerName="registry-server" Jan 30 12:11:26 crc kubenswrapper[4703]: I0130 12:11:26.965067 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" Jan 30 12:11:26 crc kubenswrapper[4703]: I0130 12:11:26.967623 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 30 12:11:26 crc kubenswrapper[4703]: I0130 12:11:26.977944 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4"] Jan 30 12:11:27 crc kubenswrapper[4703]: I0130 12:11:27.066405 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcl9g\" (UniqueName: \"kubernetes.io/projected/b14aa5da-c540-40d4-9f4c-8c00169e91b3-kube-api-access-mcl9g\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4\" (UID: \"b14aa5da-c540-40d4-9f4c-8c00169e91b3\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" Jan 30 12:11:27 crc kubenswrapper[4703]: I0130 12:11:27.066861 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b14aa5da-c540-40d4-9f4c-8c00169e91b3-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4\" (UID: \"b14aa5da-c540-40d4-9f4c-8c00169e91b3\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" Jan 30 12:11:27 crc kubenswrapper[4703]: I0130 12:11:27.066969 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b14aa5da-c540-40d4-9f4c-8c00169e91b3-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4\" (UID: \"b14aa5da-c540-40d4-9f4c-8c00169e91b3\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" Jan 30 12:11:27 crc kubenswrapper[4703]: I0130 12:11:27.168920 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b14aa5da-c540-40d4-9f4c-8c00169e91b3-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4\" (UID: \"b14aa5da-c540-40d4-9f4c-8c00169e91b3\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" Jan 30 12:11:27 crc kubenswrapper[4703]: I0130 12:11:27.168999 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcl9g\" (UniqueName: \"kubernetes.io/projected/b14aa5da-c540-40d4-9f4c-8c00169e91b3-kube-api-access-mcl9g\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4\" (UID: \"b14aa5da-c540-40d4-9f4c-8c00169e91b3\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" Jan 30 12:11:27 crc kubenswrapper[4703]: I0130 12:11:27.169069 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b14aa5da-c540-40d4-9f4c-8c00169e91b3-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4\" (UID: \"b14aa5da-c540-40d4-9f4c-8c00169e91b3\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" Jan 30 12:11:27 crc kubenswrapper[4703]: I0130 12:11:27.170047 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b14aa5da-c540-40d4-9f4c-8c00169e91b3-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4\" (UID: \"b14aa5da-c540-40d4-9f4c-8c00169e91b3\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" Jan 30 12:11:27 crc kubenswrapper[4703]: I0130 12:11:27.170373 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b14aa5da-c540-40d4-9f4c-8c00169e91b3-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4\" (UID: \"b14aa5da-c540-40d4-9f4c-8c00169e91b3\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" Jan 30 12:11:27 crc kubenswrapper[4703]: I0130 12:11:27.192224 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcl9g\" (UniqueName: \"kubernetes.io/projected/b14aa5da-c540-40d4-9f4c-8c00169e91b3-kube-api-access-mcl9g\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4\" (UID: \"b14aa5da-c540-40d4-9f4c-8c00169e91b3\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" Jan 30 12:11:27 crc kubenswrapper[4703]: I0130 12:11:27.327055 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" Jan 30 12:11:27 crc kubenswrapper[4703]: I0130 12:11:27.826463 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4"] Jan 30 12:11:27 crc kubenswrapper[4703]: I0130 12:11:27.867597 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" event={"ID":"b14aa5da-c540-40d4-9f4c-8c00169e91b3","Type":"ContainerStarted","Data":"0153af96a337c070e5cf0a0e393d2eccf62752cce8188cf72cc50beeb9a1ed6c"} Jan 30 12:11:29 crc kubenswrapper[4703]: I0130 12:11:29.886251 4703 generic.go:334] "Generic (PLEG): container finished" podID="b14aa5da-c540-40d4-9f4c-8c00169e91b3" containerID="c7d9657ef0645d12fa87407caa6195d43b3c383f1485f1cff22e88b5abfd1b7a" exitCode=0 Jan 30 12:11:29 crc kubenswrapper[4703]: I0130 12:11:29.886357 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" event={"ID":"b14aa5da-c540-40d4-9f4c-8c00169e91b3","Type":"ContainerDied","Data":"c7d9657ef0645d12fa87407caa6195d43b3c383f1485f1cff22e88b5abfd1b7a"} Jan 30 12:11:31 crc kubenswrapper[4703]: I0130 12:11:31.902087 4703 generic.go:334] "Generic (PLEG): container finished" podID="b14aa5da-c540-40d4-9f4c-8c00169e91b3" containerID="57e3b7fd2214650003d4fbd018552318647f93501f16b205581196ef831b46e2" exitCode=0 Jan 30 12:11:31 crc kubenswrapper[4703]: I0130 12:11:31.902172 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" event={"ID":"b14aa5da-c540-40d4-9f4c-8c00169e91b3","Type":"ContainerDied","Data":"57e3b7fd2214650003d4fbd018552318647f93501f16b205581196ef831b46e2"} Jan 30 12:11:32 crc kubenswrapper[4703]: I0130 12:11:32.913164 4703 generic.go:334] "Generic (PLEG): container finished" podID="b14aa5da-c540-40d4-9f4c-8c00169e91b3" containerID="6eb0022e4972ca8487c28af1cbff0753244e7551b7fc6b629bd43f867643372c" exitCode=0 Jan 30 12:11:32 crc kubenswrapper[4703]: I0130 12:11:32.913671 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" event={"ID":"b14aa5da-c540-40d4-9f4c-8c00169e91b3","Type":"ContainerDied","Data":"6eb0022e4972ca8487c28af1cbff0753244e7551b7fc6b629bd43f867643372c"} Jan 30 12:11:34 crc kubenswrapper[4703]: I0130 12:11:34.184205 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" Jan 30 12:11:34 crc kubenswrapper[4703]: I0130 12:11:34.283976 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b14aa5da-c540-40d4-9f4c-8c00169e91b3-util\") pod \"b14aa5da-c540-40d4-9f4c-8c00169e91b3\" (UID: \"b14aa5da-c540-40d4-9f4c-8c00169e91b3\") " Jan 30 12:11:34 crc kubenswrapper[4703]: I0130 12:11:34.284132 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b14aa5da-c540-40d4-9f4c-8c00169e91b3-bundle\") pod \"b14aa5da-c540-40d4-9f4c-8c00169e91b3\" (UID: \"b14aa5da-c540-40d4-9f4c-8c00169e91b3\") " Jan 30 12:11:34 crc kubenswrapper[4703]: I0130 12:11:34.284259 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcl9g\" (UniqueName: \"kubernetes.io/projected/b14aa5da-c540-40d4-9f4c-8c00169e91b3-kube-api-access-mcl9g\") pod \"b14aa5da-c540-40d4-9f4c-8c00169e91b3\" (UID: \"b14aa5da-c540-40d4-9f4c-8c00169e91b3\") " Jan 30 12:11:34 crc kubenswrapper[4703]: I0130 12:11:34.284863 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b14aa5da-c540-40d4-9f4c-8c00169e91b3-bundle" (OuterVolumeSpecName: "bundle") pod "b14aa5da-c540-40d4-9f4c-8c00169e91b3" (UID: "b14aa5da-c540-40d4-9f4c-8c00169e91b3"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:11:34 crc kubenswrapper[4703]: I0130 12:11:34.291012 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b14aa5da-c540-40d4-9f4c-8c00169e91b3-kube-api-access-mcl9g" (OuterVolumeSpecName: "kube-api-access-mcl9g") pod "b14aa5da-c540-40d4-9f4c-8c00169e91b3" (UID: "b14aa5da-c540-40d4-9f4c-8c00169e91b3"). InnerVolumeSpecName "kube-api-access-mcl9g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:11:34 crc kubenswrapper[4703]: I0130 12:11:34.305105 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b14aa5da-c540-40d4-9f4c-8c00169e91b3-util" (OuterVolumeSpecName: "util") pod "b14aa5da-c540-40d4-9f4c-8c00169e91b3" (UID: "b14aa5da-c540-40d4-9f4c-8c00169e91b3"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:11:34 crc kubenswrapper[4703]: I0130 12:11:34.386024 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcl9g\" (UniqueName: \"kubernetes.io/projected/b14aa5da-c540-40d4-9f4c-8c00169e91b3-kube-api-access-mcl9g\") on node \"crc\" DevicePath \"\"" Jan 30 12:11:34 crc kubenswrapper[4703]: I0130 12:11:34.386064 4703 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/b14aa5da-c540-40d4-9f4c-8c00169e91b3-util\") on node \"crc\" DevicePath \"\"" Jan 30 12:11:34 crc kubenswrapper[4703]: I0130 12:11:34.386074 4703 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/b14aa5da-c540-40d4-9f4c-8c00169e91b3-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:11:34 crc kubenswrapper[4703]: I0130 12:11:34.931508 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" event={"ID":"b14aa5da-c540-40d4-9f4c-8c00169e91b3","Type":"ContainerDied","Data":"0153af96a337c070e5cf0a0e393d2eccf62752cce8188cf72cc50beeb9a1ed6c"} Jan 30 12:11:34 crc kubenswrapper[4703]: I0130 12:11:34.931591 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0153af96a337c070e5cf0a0e393d2eccf62752cce8188cf72cc50beeb9a1ed6c" Jan 30 12:11:34 crc kubenswrapper[4703]: I0130 12:11:34.931629 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4" Jan 30 12:11:38 crc kubenswrapper[4703]: I0130 12:11:38.664822 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-gzrj2"] Jan 30 12:11:38 crc kubenswrapper[4703]: E0130 12:11:38.665220 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b14aa5da-c540-40d4-9f4c-8c00169e91b3" containerName="extract" Jan 30 12:11:38 crc kubenswrapper[4703]: I0130 12:11:38.665236 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="b14aa5da-c540-40d4-9f4c-8c00169e91b3" containerName="extract" Jan 30 12:11:38 crc kubenswrapper[4703]: E0130 12:11:38.665249 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b14aa5da-c540-40d4-9f4c-8c00169e91b3" containerName="util" Jan 30 12:11:38 crc kubenswrapper[4703]: I0130 12:11:38.665259 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="b14aa5da-c540-40d4-9f4c-8c00169e91b3" containerName="util" Jan 30 12:11:38 crc kubenswrapper[4703]: E0130 12:11:38.665275 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b14aa5da-c540-40d4-9f4c-8c00169e91b3" containerName="pull" Jan 30 12:11:38 crc kubenswrapper[4703]: I0130 12:11:38.665284 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="b14aa5da-c540-40d4-9f4c-8c00169e91b3" containerName="pull" Jan 30 12:11:38 crc kubenswrapper[4703]: I0130 12:11:38.665428 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="b14aa5da-c540-40d4-9f4c-8c00169e91b3" containerName="extract" Jan 30 12:11:38 crc kubenswrapper[4703]: I0130 12:11:38.666245 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-gzrj2" Jan 30 12:11:38 crc kubenswrapper[4703]: I0130 12:11:38.668992 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 30 12:11:38 crc kubenswrapper[4703]: I0130 12:11:38.669069 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 30 12:11:38 crc kubenswrapper[4703]: I0130 12:11:38.673501 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-h9xdh" Jan 30 12:11:38 crc kubenswrapper[4703]: I0130 12:11:38.683197 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-gzrj2"] Jan 30 12:11:38 crc kubenswrapper[4703]: I0130 12:11:38.754551 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fk5v7\" (UniqueName: \"kubernetes.io/projected/c26bacd3-3ee7-4276-b58d-a3213c1d8cd0-kube-api-access-fk5v7\") pod \"nmstate-operator-646758c888-gzrj2\" (UID: \"c26bacd3-3ee7-4276-b58d-a3213c1d8cd0\") " pod="openshift-nmstate/nmstate-operator-646758c888-gzrj2" Jan 30 12:11:38 crc kubenswrapper[4703]: I0130 12:11:38.856321 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fk5v7\" (UniqueName: \"kubernetes.io/projected/c26bacd3-3ee7-4276-b58d-a3213c1d8cd0-kube-api-access-fk5v7\") pod \"nmstate-operator-646758c888-gzrj2\" (UID: \"c26bacd3-3ee7-4276-b58d-a3213c1d8cd0\") " pod="openshift-nmstate/nmstate-operator-646758c888-gzrj2" Jan 30 12:11:38 crc kubenswrapper[4703]: I0130 12:11:38.907085 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fk5v7\" (UniqueName: \"kubernetes.io/projected/c26bacd3-3ee7-4276-b58d-a3213c1d8cd0-kube-api-access-fk5v7\") pod \"nmstate-operator-646758c888-gzrj2\" (UID: \"c26bacd3-3ee7-4276-b58d-a3213c1d8cd0\") " pod="openshift-nmstate/nmstate-operator-646758c888-gzrj2" Jan 30 12:11:38 crc kubenswrapper[4703]: I0130 12:11:38.988989 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-gzrj2" Jan 30 12:11:39 crc kubenswrapper[4703]: I0130 12:11:39.280058 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-gzrj2"] Jan 30 12:11:39 crc kubenswrapper[4703]: I0130 12:11:39.965916 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-gzrj2" event={"ID":"c26bacd3-3ee7-4276-b58d-a3213c1d8cd0","Type":"ContainerStarted","Data":"b7ecb11794b616354f0074498ccc3be1951a463f4e0fe3431300fdca536070ef"} Jan 30 12:11:42 crc kubenswrapper[4703]: I0130 12:11:42.988064 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-gzrj2" event={"ID":"c26bacd3-3ee7-4276-b58d-a3213c1d8cd0","Type":"ContainerStarted","Data":"64d03ab30196a670de1513c0dcabb88684724476f5f9ca95259b595fa9c725d2"} Jan 30 12:11:43 crc kubenswrapper[4703]: I0130 12:11:43.008987 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-gzrj2" podStartSLOduration=2.233971773 podStartE2EDuration="5.008946612s" podCreationTimestamp="2026-01-30 12:11:38 +0000 UTC" firstStartedPulling="2026-01-30 12:11:39.289107801 +0000 UTC m=+935.066929455" lastFinishedPulling="2026-01-30 12:11:42.06408264 +0000 UTC m=+937.841904294" observedRunningTime="2026-01-30 12:11:43.006550918 +0000 UTC m=+938.784372592" watchObservedRunningTime="2026-01-30 12:11:43.008946612 +0000 UTC m=+938.786768266" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.104259 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-rldth"] Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.105705 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-rldth" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.109349 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-5xst6" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.121166 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-rldth"] Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.155209 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-jndjq"] Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.156488 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jndjq" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.162245 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.181657 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-8ltjk"] Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.182593 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-8ltjk" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.188529 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-jndjq"] Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.238151 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdwn5\" (UniqueName: \"kubernetes.io/projected/f2c5f325-b450-4ffb-b23e-fb7322f9aad0-kube-api-access-fdwn5\") pod \"nmstate-metrics-54757c584b-rldth\" (UID: \"f2c5f325-b450-4ffb-b23e-fb7322f9aad0\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-rldth" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.302905 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59"] Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.304155 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.310809 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59"] Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.311053 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-x87b2" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.311057 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.311616 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.339610 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdwn5\" (UniqueName: \"kubernetes.io/projected/f2c5f325-b450-4ffb-b23e-fb7322f9aad0-kube-api-access-fdwn5\") pod \"nmstate-metrics-54757c584b-rldth\" (UID: \"f2c5f325-b450-4ffb-b23e-fb7322f9aad0\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-rldth" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.339689 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzfw2\" (UniqueName: \"kubernetes.io/projected/df161a1d-9b81-4b35-82b8-2efb65b490d4-kube-api-access-dzfw2\") pod \"nmstate-handler-8ltjk\" (UID: \"df161a1d-9b81-4b35-82b8-2efb65b490d4\") " pod="openshift-nmstate/nmstate-handler-8ltjk" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.339734 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/df161a1d-9b81-4b35-82b8-2efb65b490d4-dbus-socket\") pod \"nmstate-handler-8ltjk\" (UID: \"df161a1d-9b81-4b35-82b8-2efb65b490d4\") " pod="openshift-nmstate/nmstate-handler-8ltjk" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.339788 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qft8\" (UniqueName: \"kubernetes.io/projected/affa004f-4f06-4a2c-9e91-3ac54a7e5a4f-kube-api-access-7qft8\") pod \"nmstate-webhook-8474b5b9d8-jndjq\" (UID: \"affa004f-4f06-4a2c-9e91-3ac54a7e5a4f\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jndjq" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.339820 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/affa004f-4f06-4a2c-9e91-3ac54a7e5a4f-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-jndjq\" (UID: \"affa004f-4f06-4a2c-9e91-3ac54a7e5a4f\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jndjq" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.339855 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/df161a1d-9b81-4b35-82b8-2efb65b490d4-ovs-socket\") pod \"nmstate-handler-8ltjk\" (UID: \"df161a1d-9b81-4b35-82b8-2efb65b490d4\") " pod="openshift-nmstate/nmstate-handler-8ltjk" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.339950 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/df161a1d-9b81-4b35-82b8-2efb65b490d4-nmstate-lock\") pod \"nmstate-handler-8ltjk\" (UID: \"df161a1d-9b81-4b35-82b8-2efb65b490d4\") " pod="openshift-nmstate/nmstate-handler-8ltjk" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.380733 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdwn5\" (UniqueName: \"kubernetes.io/projected/f2c5f325-b450-4ffb-b23e-fb7322f9aad0-kube-api-access-fdwn5\") pod \"nmstate-metrics-54757c584b-rldth\" (UID: \"f2c5f325-b450-4ffb-b23e-fb7322f9aad0\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-rldth" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.440914 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qft8\" (UniqueName: \"kubernetes.io/projected/affa004f-4f06-4a2c-9e91-3ac54a7e5a4f-kube-api-access-7qft8\") pod \"nmstate-webhook-8474b5b9d8-jndjq\" (UID: \"affa004f-4f06-4a2c-9e91-3ac54a7e5a4f\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jndjq" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.440982 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwxbj\" (UniqueName: \"kubernetes.io/projected/ca06ad3d-27fa-4486-a6a6-2eeed1619033-kube-api-access-hwxbj\") pod \"nmstate-console-plugin-7754f76f8b-4qn59\" (UID: \"ca06ad3d-27fa-4486-a6a6-2eeed1619033\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.441018 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/affa004f-4f06-4a2c-9e91-3ac54a7e5a4f-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-jndjq\" (UID: \"affa004f-4f06-4a2c-9e91-3ac54a7e5a4f\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jndjq" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.441050 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/df161a1d-9b81-4b35-82b8-2efb65b490d4-ovs-socket\") pod \"nmstate-handler-8ltjk\" (UID: \"df161a1d-9b81-4b35-82b8-2efb65b490d4\") " pod="openshift-nmstate/nmstate-handler-8ltjk" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.441140 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/df161a1d-9b81-4b35-82b8-2efb65b490d4-ovs-socket\") pod \"nmstate-handler-8ltjk\" (UID: \"df161a1d-9b81-4b35-82b8-2efb65b490d4\") " pod="openshift-nmstate/nmstate-handler-8ltjk" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.441152 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/ca06ad3d-27fa-4486-a6a6-2eeed1619033-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-4qn59\" (UID: \"ca06ad3d-27fa-4486-a6a6-2eeed1619033\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59" Jan 30 12:11:48 crc kubenswrapper[4703]: E0130 12:11:48.441169 4703 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.441202 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/df161a1d-9b81-4b35-82b8-2efb65b490d4-nmstate-lock\") pod \"nmstate-handler-8ltjk\" (UID: \"df161a1d-9b81-4b35-82b8-2efb65b490d4\") " pod="openshift-nmstate/nmstate-handler-8ltjk" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.441178 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/df161a1d-9b81-4b35-82b8-2efb65b490d4-nmstate-lock\") pod \"nmstate-handler-8ltjk\" (UID: \"df161a1d-9b81-4b35-82b8-2efb65b490d4\") " pod="openshift-nmstate/nmstate-handler-8ltjk" Jan 30 12:11:48 crc kubenswrapper[4703]: E0130 12:11:48.441238 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/affa004f-4f06-4a2c-9e91-3ac54a7e5a4f-tls-key-pair podName:affa004f-4f06-4a2c-9e91-3ac54a7e5a4f nodeName:}" failed. No retries permitted until 2026-01-30 12:11:48.941212029 +0000 UTC m=+944.719033683 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/affa004f-4f06-4a2c-9e91-3ac54a7e5a4f-tls-key-pair") pod "nmstate-webhook-8474b5b9d8-jndjq" (UID: "affa004f-4f06-4a2c-9e91-3ac54a7e5a4f") : secret "openshift-nmstate-webhook" not found Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.441290 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzfw2\" (UniqueName: \"kubernetes.io/projected/df161a1d-9b81-4b35-82b8-2efb65b490d4-kube-api-access-dzfw2\") pod \"nmstate-handler-8ltjk\" (UID: \"df161a1d-9b81-4b35-82b8-2efb65b490d4\") " pod="openshift-nmstate/nmstate-handler-8ltjk" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.441320 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/ca06ad3d-27fa-4486-a6a6-2eeed1619033-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-4qn59\" (UID: \"ca06ad3d-27fa-4486-a6a6-2eeed1619033\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.441344 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/df161a1d-9b81-4b35-82b8-2efb65b490d4-dbus-socket\") pod \"nmstate-handler-8ltjk\" (UID: \"df161a1d-9b81-4b35-82b8-2efb65b490d4\") " pod="openshift-nmstate/nmstate-handler-8ltjk" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.441789 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/df161a1d-9b81-4b35-82b8-2efb65b490d4-dbus-socket\") pod \"nmstate-handler-8ltjk\" (UID: \"df161a1d-9b81-4b35-82b8-2efb65b490d4\") " pod="openshift-nmstate/nmstate-handler-8ltjk" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.445991 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-rldth" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.464790 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qft8\" (UniqueName: \"kubernetes.io/projected/affa004f-4f06-4a2c-9e91-3ac54a7e5a4f-kube-api-access-7qft8\") pod \"nmstate-webhook-8474b5b9d8-jndjq\" (UID: \"affa004f-4f06-4a2c-9e91-3ac54a7e5a4f\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jndjq" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.465775 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzfw2\" (UniqueName: \"kubernetes.io/projected/df161a1d-9b81-4b35-82b8-2efb65b490d4-kube-api-access-dzfw2\") pod \"nmstate-handler-8ltjk\" (UID: \"df161a1d-9b81-4b35-82b8-2efb65b490d4\") " pod="openshift-nmstate/nmstate-handler-8ltjk" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.516249 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-8ltjk" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.542485 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwxbj\" (UniqueName: \"kubernetes.io/projected/ca06ad3d-27fa-4486-a6a6-2eeed1619033-kube-api-access-hwxbj\") pod \"nmstate-console-plugin-7754f76f8b-4qn59\" (UID: \"ca06ad3d-27fa-4486-a6a6-2eeed1619033\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.542591 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/ca06ad3d-27fa-4486-a6a6-2eeed1619033-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-4qn59\" (UID: \"ca06ad3d-27fa-4486-a6a6-2eeed1619033\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.542637 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/ca06ad3d-27fa-4486-a6a6-2eeed1619033-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-4qn59\" (UID: \"ca06ad3d-27fa-4486-a6a6-2eeed1619033\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.542590 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-8ddf6489b-c5h7r"] Jan 30 12:11:48 crc kubenswrapper[4703]: E0130 12:11:48.542946 4703 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Jan 30 12:11:48 crc kubenswrapper[4703]: E0130 12:11:48.543066 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ca06ad3d-27fa-4486-a6a6-2eeed1619033-plugin-serving-cert podName:ca06ad3d-27fa-4486-a6a6-2eeed1619033 nodeName:}" failed. No retries permitted until 2026-01-30 12:11:49.043036734 +0000 UTC m=+944.820858388 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/ca06ad3d-27fa-4486-a6a6-2eeed1619033-plugin-serving-cert") pod "nmstate-console-plugin-7754f76f8b-4qn59" (UID: "ca06ad3d-27fa-4486-a6a6-2eeed1619033") : secret "plugin-serving-cert" not found Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.543983 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/ca06ad3d-27fa-4486-a6a6-2eeed1619033-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-4qn59\" (UID: \"ca06ad3d-27fa-4486-a6a6-2eeed1619033\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.543985 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.562724 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-8ddf6489b-c5h7r"] Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.575463 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwxbj\" (UniqueName: \"kubernetes.io/projected/ca06ad3d-27fa-4486-a6a6-2eeed1619033-kube-api-access-hwxbj\") pod \"nmstate-console-plugin-7754f76f8b-4qn59\" (UID: \"ca06ad3d-27fa-4486-a6a6-2eeed1619033\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.644711 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-oauth-serving-cert\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.644783 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-console-oauth-config\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.644826 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrn7r\" (UniqueName: \"kubernetes.io/projected/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-kube-api-access-zrn7r\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.644872 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-console-config\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.644916 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-service-ca\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.645006 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-console-serving-cert\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.645052 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-trusted-ca-bundle\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.747347 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-trusted-ca-bundle\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.747491 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-oauth-serving-cert\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.747529 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrn7r\" (UniqueName: \"kubernetes.io/projected/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-kube-api-access-zrn7r\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.747569 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-console-oauth-config\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.747600 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-console-config\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.747624 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-service-ca\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.747679 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-console-serving-cert\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.750475 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-trusted-ca-bundle\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.751285 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-console-config\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.751958 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-service-ca\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.752327 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-console-serving-cert\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.752548 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-console-oauth-config\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.752878 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-oauth-serving-cert\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.787226 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrn7r\" (UniqueName: \"kubernetes.io/projected/54403e63-53b4-4b87-8aa7-c4ea10d0bbfd-kube-api-access-zrn7r\") pod \"console-8ddf6489b-c5h7r\" (UID: \"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd\") " pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.869747 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.952592 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/affa004f-4f06-4a2c-9e91-3ac54a7e5a4f-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-jndjq\" (UID: \"affa004f-4f06-4a2c-9e91-3ac54a7e5a4f\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jndjq" Jan 30 12:11:48 crc kubenswrapper[4703]: I0130 12:11:48.958964 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/affa004f-4f06-4a2c-9e91-3ac54a7e5a4f-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-jndjq\" (UID: \"affa004f-4f06-4a2c-9e91-3ac54a7e5a4f\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jndjq" Jan 30 12:11:49 crc kubenswrapper[4703]: I0130 12:11:49.059566 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/ca06ad3d-27fa-4486-a6a6-2eeed1619033-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-4qn59\" (UID: \"ca06ad3d-27fa-4486-a6a6-2eeed1619033\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59" Jan 30 12:11:49 crc kubenswrapper[4703]: I0130 12:11:49.063577 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/ca06ad3d-27fa-4486-a6a6-2eeed1619033-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-4qn59\" (UID: \"ca06ad3d-27fa-4486-a6a6-2eeed1619033\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59" Jan 30 12:11:49 crc kubenswrapper[4703]: I0130 12:11:49.070904 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-8ltjk" event={"ID":"df161a1d-9b81-4b35-82b8-2efb65b490d4","Type":"ContainerStarted","Data":"4c49aafcc47c8457ad7d4bf63c7618ab662d2d3338d4f5487f0c296c9be4ebd7"} Jan 30 12:11:49 crc kubenswrapper[4703]: I0130 12:11:49.091458 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jndjq" Jan 30 12:11:49 crc kubenswrapper[4703]: I0130 12:11:49.123475 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-rldth"] Jan 30 12:11:49 crc kubenswrapper[4703]: I0130 12:11:49.234138 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-8ddf6489b-c5h7r"] Jan 30 12:11:49 crc kubenswrapper[4703]: I0130 12:11:49.234616 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59" Jan 30 12:11:49 crc kubenswrapper[4703]: I0130 12:11:49.385138 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-jndjq"] Jan 30 12:11:49 crc kubenswrapper[4703]: I0130 12:11:49.516229 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59"] Jan 30 12:11:49 crc kubenswrapper[4703]: W0130 12:11:49.533407 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podca06ad3d_27fa_4486_a6a6_2eeed1619033.slice/crio-22fbceeea323daff3f54a3d41d1c0b7191df1d6d91c2495d9c983a641cccdbd9 WatchSource:0}: Error finding container 22fbceeea323daff3f54a3d41d1c0b7191df1d6d91c2495d9c983a641cccdbd9: Status 404 returned error can't find the container with id 22fbceeea323daff3f54a3d41d1c0b7191df1d6d91c2495d9c983a641cccdbd9 Jan 30 12:11:50 crc kubenswrapper[4703]: I0130 12:11:50.081833 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-rldth" event={"ID":"f2c5f325-b450-4ffb-b23e-fb7322f9aad0","Type":"ContainerStarted","Data":"33ad94a4e69bdfea1b0f7bbeda9b865127e972fa89154cc8a1a3b23e430812b5"} Jan 30 12:11:50 crc kubenswrapper[4703]: I0130 12:11:50.083612 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jndjq" event={"ID":"affa004f-4f06-4a2c-9e91-3ac54a7e5a4f","Type":"ContainerStarted","Data":"7d698a0f739ddfe25eb88e2f27dc433ea73227be42f78b8e86327385dcc28bae"} Jan 30 12:11:50 crc kubenswrapper[4703]: I0130 12:11:50.085141 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-8ddf6489b-c5h7r" event={"ID":"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd","Type":"ContainerStarted","Data":"1114d4ed0b185edc1892dd673ff28426480d1185db5cb53d390f135c4ea6e538"} Jan 30 12:11:50 crc kubenswrapper[4703]: I0130 12:11:50.085168 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-8ddf6489b-c5h7r" event={"ID":"54403e63-53b4-4b87-8aa7-c4ea10d0bbfd","Type":"ContainerStarted","Data":"8050f60ecdf0f88a496c39e1ea8fa0aac3858300aa3ea3600def37c944922e94"} Jan 30 12:11:50 crc kubenswrapper[4703]: I0130 12:11:50.086681 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59" event={"ID":"ca06ad3d-27fa-4486-a6a6-2eeed1619033","Type":"ContainerStarted","Data":"22fbceeea323daff3f54a3d41d1c0b7191df1d6d91c2495d9c983a641cccdbd9"} Jan 30 12:11:50 crc kubenswrapper[4703]: I0130 12:11:50.111858 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-8ddf6489b-c5h7r" podStartSLOduration=2.111830868 podStartE2EDuration="2.111830868s" podCreationTimestamp="2026-01-30 12:11:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:11:50.104695856 +0000 UTC m=+945.882517530" watchObservedRunningTime="2026-01-30 12:11:50.111830868 +0000 UTC m=+945.889652522" Jan 30 12:11:54 crc kubenswrapper[4703]: I0130 12:11:54.209819 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-8ltjk" event={"ID":"df161a1d-9b81-4b35-82b8-2efb65b490d4","Type":"ContainerStarted","Data":"d9ea4ebf3bd8be86ee52af0d39c5d9dbd229707f670bf7fed8376211944e02a9"} Jan 30 12:11:54 crc kubenswrapper[4703]: I0130 12:11:54.210741 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-8ltjk" Jan 30 12:11:54 crc kubenswrapper[4703]: I0130 12:11:54.211603 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59" event={"ID":"ca06ad3d-27fa-4486-a6a6-2eeed1619033","Type":"ContainerStarted","Data":"83b8163b0d8412fda29c885050d56a644db9d51c5d91d42cb8716c16a2e9b1ba"} Jan 30 12:11:54 crc kubenswrapper[4703]: I0130 12:11:54.214086 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-rldth" event={"ID":"f2c5f325-b450-4ffb-b23e-fb7322f9aad0","Type":"ContainerStarted","Data":"cb505493f0b02271a1cac2cd6d27a567232522f17b066498f386167f8326181b"} Jan 30 12:11:54 crc kubenswrapper[4703]: I0130 12:11:54.215442 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jndjq" event={"ID":"affa004f-4f06-4a2c-9e91-3ac54a7e5a4f","Type":"ContainerStarted","Data":"6962b93be9ba54558744e50a0e69994c460a180781c07a1130b8710992d51a95"} Jan 30 12:11:54 crc kubenswrapper[4703]: I0130 12:11:54.215625 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jndjq" Jan 30 12:11:54 crc kubenswrapper[4703]: I0130 12:11:54.234359 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-8ltjk" podStartSLOduration=1.749267259 podStartE2EDuration="6.234327861s" podCreationTimestamp="2026-01-30 12:11:48 +0000 UTC" firstStartedPulling="2026-01-30 12:11:48.66499911 +0000 UTC m=+944.442820764" lastFinishedPulling="2026-01-30 12:11:53.150059692 +0000 UTC m=+948.927881366" observedRunningTime="2026-01-30 12:11:54.22798745 +0000 UTC m=+950.005809094" watchObservedRunningTime="2026-01-30 12:11:54.234327861 +0000 UTC m=+950.012149515" Jan 30 12:11:54 crc kubenswrapper[4703]: I0130 12:11:54.243494 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-4qn59" podStartSLOduration=2.628399619 podStartE2EDuration="6.243395665s" podCreationTimestamp="2026-01-30 12:11:48 +0000 UTC" firstStartedPulling="2026-01-30 12:11:49.53523189 +0000 UTC m=+945.313053544" lastFinishedPulling="2026-01-30 12:11:53.150227936 +0000 UTC m=+948.928049590" observedRunningTime="2026-01-30 12:11:54.241205106 +0000 UTC m=+950.019026770" watchObservedRunningTime="2026-01-30 12:11:54.243395665 +0000 UTC m=+950.021217319" Jan 30 12:11:55 crc kubenswrapper[4703]: I0130 12:11:55.134276 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jndjq" podStartSLOduration=3.381927745 podStartE2EDuration="7.134246841s" podCreationTimestamp="2026-01-30 12:11:48 +0000 UTC" firstStartedPulling="2026-01-30 12:11:49.398481686 +0000 UTC m=+945.176303350" lastFinishedPulling="2026-01-30 12:11:53.150800792 +0000 UTC m=+948.928622446" observedRunningTime="2026-01-30 12:11:54.263421984 +0000 UTC m=+950.041243638" watchObservedRunningTime="2026-01-30 12:11:55.134246841 +0000 UTC m=+950.912068495" Jan 30 12:11:57 crc kubenswrapper[4703]: I0130 12:11:57.246092 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-rldth" event={"ID":"f2c5f325-b450-4ffb-b23e-fb7322f9aad0","Type":"ContainerStarted","Data":"b06c97d60653f9c000c8c69296054af8d4128986382f12b9487bfb319f8774e6"} Jan 30 12:11:57 crc kubenswrapper[4703]: I0130 12:11:57.274146 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-rldth" podStartSLOduration=1.929453735 podStartE2EDuration="9.274108086s" podCreationTimestamp="2026-01-30 12:11:48 +0000 UTC" firstStartedPulling="2026-01-30 12:11:49.122347395 +0000 UTC m=+944.900169049" lastFinishedPulling="2026-01-30 12:11:56.467001756 +0000 UTC m=+952.244823400" observedRunningTime="2026-01-30 12:11:57.271052433 +0000 UTC m=+953.048874107" watchObservedRunningTime="2026-01-30 12:11:57.274108086 +0000 UTC m=+953.051929740" Jan 30 12:11:58 crc kubenswrapper[4703]: I0130 12:11:58.541502 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-8ltjk" Jan 30 12:11:58 crc kubenswrapper[4703]: I0130 12:11:58.870532 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:58 crc kubenswrapper[4703]: I0130 12:11:58.871418 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:58 crc kubenswrapper[4703]: I0130 12:11:58.875641 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:59 crc kubenswrapper[4703]: I0130 12:11:59.267477 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-8ddf6489b-c5h7r" Jan 30 12:11:59 crc kubenswrapper[4703]: I0130 12:11:59.330465 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-mxjx6"] Jan 30 12:12:09 crc kubenswrapper[4703]: I0130 12:12:09.097960 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-jndjq" Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.379762 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-mxjx6" podUID="aa4aaaa5-396e-4e62-92a3-74b835af58a7" containerName="console" containerID="cri-o://935eb1d863568ff69429b034680d848c693ddc18879d2ee1a12e82cdb2781307" gracePeriod=15 Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.517409 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-mxjx6_aa4aaaa5-396e-4e62-92a3-74b835af58a7/console/0.log" Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.517479 4703 generic.go:334] "Generic (PLEG): container finished" podID="aa4aaaa5-396e-4e62-92a3-74b835af58a7" containerID="935eb1d863568ff69429b034680d848c693ddc18879d2ee1a12e82cdb2781307" exitCode=2 Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.517531 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-mxjx6" event={"ID":"aa4aaaa5-396e-4e62-92a3-74b835af58a7","Type":"ContainerDied","Data":"935eb1d863568ff69429b034680d848c693ddc18879d2ee1a12e82cdb2781307"} Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.824356 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-mxjx6_aa4aaaa5-396e-4e62-92a3-74b835af58a7/console/0.log" Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.824455 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.948068 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-oauth-serving-cert\") pod \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.948278 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-oauth-config\") pod \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.948346 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-service-ca\") pod \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.948376 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-config\") pod \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.948421 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-serving-cert\") pod \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.948512 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zzvln\" (UniqueName: \"kubernetes.io/projected/aa4aaaa5-396e-4e62-92a3-74b835af58a7-kube-api-access-zzvln\") pod \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.948545 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-trusted-ca-bundle\") pod \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\" (UID: \"aa4aaaa5-396e-4e62-92a3-74b835af58a7\") " Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.949387 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "aa4aaaa5-396e-4e62-92a3-74b835af58a7" (UID: "aa4aaaa5-396e-4e62-92a3-74b835af58a7"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.949401 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-config" (OuterVolumeSpecName: "console-config") pod "aa4aaaa5-396e-4e62-92a3-74b835af58a7" (UID: "aa4aaaa5-396e-4e62-92a3-74b835af58a7"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.949538 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-service-ca" (OuterVolumeSpecName: "service-ca") pod "aa4aaaa5-396e-4e62-92a3-74b835af58a7" (UID: "aa4aaaa5-396e-4e62-92a3-74b835af58a7"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.949633 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "aa4aaaa5-396e-4e62-92a3-74b835af58a7" (UID: "aa4aaaa5-396e-4e62-92a3-74b835af58a7"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.956973 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa4aaaa5-396e-4e62-92a3-74b835af58a7-kube-api-access-zzvln" (OuterVolumeSpecName: "kube-api-access-zzvln") pod "aa4aaaa5-396e-4e62-92a3-74b835af58a7" (UID: "aa4aaaa5-396e-4e62-92a3-74b835af58a7"). InnerVolumeSpecName "kube-api-access-zzvln". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.957179 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "aa4aaaa5-396e-4e62-92a3-74b835af58a7" (UID: "aa4aaaa5-396e-4e62-92a3-74b835af58a7"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:12:24 crc kubenswrapper[4703]: I0130 12:12:24.965394 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "aa4aaaa5-396e-4e62-92a3-74b835af58a7" (UID: "aa4aaaa5-396e-4e62-92a3-74b835af58a7"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.049747 4703 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-service-ca\") on node \"crc\" DevicePath \"\"" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.049807 4703 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.049824 4703 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.049840 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zzvln\" (UniqueName: \"kubernetes.io/projected/aa4aaaa5-396e-4e62-92a3-74b835af58a7-kube-api-access-zzvln\") on node \"crc\" DevicePath \"\"" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.049849 4703 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.049858 4703 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/aa4aaaa5-396e-4e62-92a3-74b835af58a7-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.049867 4703 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/aa4aaaa5-396e-4e62-92a3-74b835af58a7-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.346070 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs"] Jan 30 12:12:25 crc kubenswrapper[4703]: E0130 12:12:25.346871 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa4aaaa5-396e-4e62-92a3-74b835af58a7" containerName="console" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.346887 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa4aaaa5-396e-4e62-92a3-74b835af58a7" containerName="console" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.347004 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa4aaaa5-396e-4e62-92a3-74b835af58a7" containerName="console" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.348029 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.351936 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.354270 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hm6n8\" (UniqueName: \"kubernetes.io/projected/96adb521-689d-4159-8cc3-73d1e75c182e-kube-api-access-hm6n8\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs\" (UID: \"96adb521-689d-4159-8cc3-73d1e75c182e\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.354404 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/96adb521-689d-4159-8cc3-73d1e75c182e-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs\" (UID: \"96adb521-689d-4159-8cc3-73d1e75c182e\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.354449 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/96adb521-689d-4159-8cc3-73d1e75c182e-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs\" (UID: \"96adb521-689d-4159-8cc3-73d1e75c182e\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.361342 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs"] Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.455803 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hm6n8\" (UniqueName: \"kubernetes.io/projected/96adb521-689d-4159-8cc3-73d1e75c182e-kube-api-access-hm6n8\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs\" (UID: \"96adb521-689d-4159-8cc3-73d1e75c182e\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.455869 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/96adb521-689d-4159-8cc3-73d1e75c182e-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs\" (UID: \"96adb521-689d-4159-8cc3-73d1e75c182e\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.455892 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/96adb521-689d-4159-8cc3-73d1e75c182e-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs\" (UID: \"96adb521-689d-4159-8cc3-73d1e75c182e\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.456695 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/96adb521-689d-4159-8cc3-73d1e75c182e-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs\" (UID: \"96adb521-689d-4159-8cc3-73d1e75c182e\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.456718 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/96adb521-689d-4159-8cc3-73d1e75c182e-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs\" (UID: \"96adb521-689d-4159-8cc3-73d1e75c182e\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.474014 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hm6n8\" (UniqueName: \"kubernetes.io/projected/96adb521-689d-4159-8cc3-73d1e75c182e-kube-api-access-hm6n8\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs\" (UID: \"96adb521-689d-4159-8cc3-73d1e75c182e\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.527222 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-mxjx6_aa4aaaa5-396e-4e62-92a3-74b835af58a7/console/0.log" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.527308 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-mxjx6" event={"ID":"aa4aaaa5-396e-4e62-92a3-74b835af58a7","Type":"ContainerDied","Data":"70dde461d8ef0383724a92d817faba63e7464a6eeb43fe4193addee302ed5001"} Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.527356 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-mxjx6" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.527373 4703 scope.go:117] "RemoveContainer" containerID="935eb1d863568ff69429b034680d848c693ddc18879d2ee1a12e82cdb2781307" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.559715 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-mxjx6"] Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.568710 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-mxjx6"] Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.664515 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" Jan 30 12:12:25 crc kubenswrapper[4703]: I0130 12:12:25.875655 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs"] Jan 30 12:12:26 crc kubenswrapper[4703]: I0130 12:12:26.534997 4703 generic.go:334] "Generic (PLEG): container finished" podID="96adb521-689d-4159-8cc3-73d1e75c182e" containerID="ee67ac1b09642d94c89310d0cc92710b1385e7d013b4177aa50fdeb59a3d77da" exitCode=0 Jan 30 12:12:26 crc kubenswrapper[4703]: I0130 12:12:26.535078 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" event={"ID":"96adb521-689d-4159-8cc3-73d1e75c182e","Type":"ContainerDied","Data":"ee67ac1b09642d94c89310d0cc92710b1385e7d013b4177aa50fdeb59a3d77da"} Jan 30 12:12:26 crc kubenswrapper[4703]: I0130 12:12:26.535149 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" event={"ID":"96adb521-689d-4159-8cc3-73d1e75c182e","Type":"ContainerStarted","Data":"07aff12c55d9ad94692034a90dc6ca79f7ad3565c3d10fb53e9d625a1270a7de"} Jan 30 12:12:27 crc kubenswrapper[4703]: I0130 12:12:27.099341 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa4aaaa5-396e-4e62-92a3-74b835af58a7" path="/var/lib/kubelet/pods/aa4aaaa5-396e-4e62-92a3-74b835af58a7/volumes" Jan 30 12:12:28 crc kubenswrapper[4703]: I0130 12:12:28.554020 4703 generic.go:334] "Generic (PLEG): container finished" podID="96adb521-689d-4159-8cc3-73d1e75c182e" containerID="93d97c2081c30de438139df84fc462b11c8294d84ab35bcf630963a1e4b3d4e4" exitCode=0 Jan 30 12:12:28 crc kubenswrapper[4703]: I0130 12:12:28.554155 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" event={"ID":"96adb521-689d-4159-8cc3-73d1e75c182e","Type":"ContainerDied","Data":"93d97c2081c30de438139df84fc462b11c8294d84ab35bcf630963a1e4b3d4e4"} Jan 30 12:12:29 crc kubenswrapper[4703]: I0130 12:12:29.567458 4703 generic.go:334] "Generic (PLEG): container finished" podID="96adb521-689d-4159-8cc3-73d1e75c182e" containerID="d8a9e9d1c572112fb6d1f31082c2b8764416aa3107b9bc9c812cef9f7e7ac6c2" exitCode=0 Jan 30 12:12:29 crc kubenswrapper[4703]: I0130 12:12:29.567557 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" event={"ID":"96adb521-689d-4159-8cc3-73d1e75c182e","Type":"ContainerDied","Data":"d8a9e9d1c572112fb6d1f31082c2b8764416aa3107b9bc9c812cef9f7e7ac6c2"} Jan 30 12:12:30 crc kubenswrapper[4703]: I0130 12:12:30.830565 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" Jan 30 12:12:30 crc kubenswrapper[4703]: I0130 12:12:30.944069 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/96adb521-689d-4159-8cc3-73d1e75c182e-bundle\") pod \"96adb521-689d-4159-8cc3-73d1e75c182e\" (UID: \"96adb521-689d-4159-8cc3-73d1e75c182e\") " Jan 30 12:12:30 crc kubenswrapper[4703]: I0130 12:12:30.944170 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hm6n8\" (UniqueName: \"kubernetes.io/projected/96adb521-689d-4159-8cc3-73d1e75c182e-kube-api-access-hm6n8\") pod \"96adb521-689d-4159-8cc3-73d1e75c182e\" (UID: \"96adb521-689d-4159-8cc3-73d1e75c182e\") " Jan 30 12:12:30 crc kubenswrapper[4703]: I0130 12:12:30.944214 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/96adb521-689d-4159-8cc3-73d1e75c182e-util\") pod \"96adb521-689d-4159-8cc3-73d1e75c182e\" (UID: \"96adb521-689d-4159-8cc3-73d1e75c182e\") " Jan 30 12:12:30 crc kubenswrapper[4703]: I0130 12:12:30.946053 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/96adb521-689d-4159-8cc3-73d1e75c182e-bundle" (OuterVolumeSpecName: "bundle") pod "96adb521-689d-4159-8cc3-73d1e75c182e" (UID: "96adb521-689d-4159-8cc3-73d1e75c182e"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:12:30 crc kubenswrapper[4703]: I0130 12:12:30.954439 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96adb521-689d-4159-8cc3-73d1e75c182e-kube-api-access-hm6n8" (OuterVolumeSpecName: "kube-api-access-hm6n8") pod "96adb521-689d-4159-8cc3-73d1e75c182e" (UID: "96adb521-689d-4159-8cc3-73d1e75c182e"). InnerVolumeSpecName "kube-api-access-hm6n8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:12:30 crc kubenswrapper[4703]: I0130 12:12:30.961794 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/96adb521-689d-4159-8cc3-73d1e75c182e-util" (OuterVolumeSpecName: "util") pod "96adb521-689d-4159-8cc3-73d1e75c182e" (UID: "96adb521-689d-4159-8cc3-73d1e75c182e"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:12:31 crc kubenswrapper[4703]: I0130 12:12:31.045692 4703 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/96adb521-689d-4159-8cc3-73d1e75c182e-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:12:31 crc kubenswrapper[4703]: I0130 12:12:31.045730 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hm6n8\" (UniqueName: \"kubernetes.io/projected/96adb521-689d-4159-8cc3-73d1e75c182e-kube-api-access-hm6n8\") on node \"crc\" DevicePath \"\"" Jan 30 12:12:31 crc kubenswrapper[4703]: I0130 12:12:31.045740 4703 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/96adb521-689d-4159-8cc3-73d1e75c182e-util\") on node \"crc\" DevicePath \"\"" Jan 30 12:12:31 crc kubenswrapper[4703]: I0130 12:12:31.586557 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" event={"ID":"96adb521-689d-4159-8cc3-73d1e75c182e","Type":"ContainerDied","Data":"07aff12c55d9ad94692034a90dc6ca79f7ad3565c3d10fb53e9d625a1270a7de"} Jan 30 12:12:31 crc kubenswrapper[4703]: I0130 12:12:31.586610 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="07aff12c55d9ad94692034a90dc6ca79f7ad3565c3d10fb53e9d625a1270a7de" Jan 30 12:12:31 crc kubenswrapper[4703]: I0130 12:12:31.586696 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs" Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.290985 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-p4sfq"] Jan 30 12:12:36 crc kubenswrapper[4703]: E0130 12:12:36.292523 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96adb521-689d-4159-8cc3-73d1e75c182e" containerName="extract" Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.292541 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="96adb521-689d-4159-8cc3-73d1e75c182e" containerName="extract" Jan 30 12:12:36 crc kubenswrapper[4703]: E0130 12:12:36.292554 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96adb521-689d-4159-8cc3-73d1e75c182e" containerName="util" Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.292560 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="96adb521-689d-4159-8cc3-73d1e75c182e" containerName="util" Jan 30 12:12:36 crc kubenswrapper[4703]: E0130 12:12:36.292567 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96adb521-689d-4159-8cc3-73d1e75c182e" containerName="pull" Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.292575 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="96adb521-689d-4159-8cc3-73d1e75c182e" containerName="pull" Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.292700 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="96adb521-689d-4159-8cc3-73d1e75c182e" containerName="extract" Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.293853 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p4sfq" Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.306418 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-p4sfq"] Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.360751 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e8648d2-1388-4187-b138-19c88fae3b94-utilities\") pod \"community-operators-p4sfq\" (UID: \"8e8648d2-1388-4187-b138-19c88fae3b94\") " pod="openshift-marketplace/community-operators-p4sfq" Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.360809 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e8648d2-1388-4187-b138-19c88fae3b94-catalog-content\") pod \"community-operators-p4sfq\" (UID: \"8e8648d2-1388-4187-b138-19c88fae3b94\") " pod="openshift-marketplace/community-operators-p4sfq" Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.361029 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpm7h\" (UniqueName: \"kubernetes.io/projected/8e8648d2-1388-4187-b138-19c88fae3b94-kube-api-access-wpm7h\") pod \"community-operators-p4sfq\" (UID: \"8e8648d2-1388-4187-b138-19c88fae3b94\") " pod="openshift-marketplace/community-operators-p4sfq" Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.462739 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e8648d2-1388-4187-b138-19c88fae3b94-utilities\") pod \"community-operators-p4sfq\" (UID: \"8e8648d2-1388-4187-b138-19c88fae3b94\") " pod="openshift-marketplace/community-operators-p4sfq" Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.462806 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e8648d2-1388-4187-b138-19c88fae3b94-catalog-content\") pod \"community-operators-p4sfq\" (UID: \"8e8648d2-1388-4187-b138-19c88fae3b94\") " pod="openshift-marketplace/community-operators-p4sfq" Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.462864 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpm7h\" (UniqueName: \"kubernetes.io/projected/8e8648d2-1388-4187-b138-19c88fae3b94-kube-api-access-wpm7h\") pod \"community-operators-p4sfq\" (UID: \"8e8648d2-1388-4187-b138-19c88fae3b94\") " pod="openshift-marketplace/community-operators-p4sfq" Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.463465 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e8648d2-1388-4187-b138-19c88fae3b94-utilities\") pod \"community-operators-p4sfq\" (UID: \"8e8648d2-1388-4187-b138-19c88fae3b94\") " pod="openshift-marketplace/community-operators-p4sfq" Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.463585 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e8648d2-1388-4187-b138-19c88fae3b94-catalog-content\") pod \"community-operators-p4sfq\" (UID: \"8e8648d2-1388-4187-b138-19c88fae3b94\") " pod="openshift-marketplace/community-operators-p4sfq" Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.484039 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpm7h\" (UniqueName: \"kubernetes.io/projected/8e8648d2-1388-4187-b138-19c88fae3b94-kube-api-access-wpm7h\") pod \"community-operators-p4sfq\" (UID: \"8e8648d2-1388-4187-b138-19c88fae3b94\") " pod="openshift-marketplace/community-operators-p4sfq" Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.615938 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p4sfq" Jan 30 12:12:36 crc kubenswrapper[4703]: I0130 12:12:36.967298 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-p4sfq"] Jan 30 12:12:37 crc kubenswrapper[4703]: I0130 12:12:37.630145 4703 generic.go:334] "Generic (PLEG): container finished" podID="8e8648d2-1388-4187-b138-19c88fae3b94" containerID="43bae8e9c761e5db86bae4cac21022cc25674389653949cf3b17882b425d77f4" exitCode=0 Jan 30 12:12:37 crc kubenswrapper[4703]: I0130 12:12:37.630252 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p4sfq" event={"ID":"8e8648d2-1388-4187-b138-19c88fae3b94","Type":"ContainerDied","Data":"43bae8e9c761e5db86bae4cac21022cc25674389653949cf3b17882b425d77f4"} Jan 30 12:12:37 crc kubenswrapper[4703]: I0130 12:12:37.630716 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p4sfq" event={"ID":"8e8648d2-1388-4187-b138-19c88fae3b94","Type":"ContainerStarted","Data":"43c84eead637544e352f7f17d8efcf2e8444972f8af9e4fd00fa4b0ccbd15ef0"} Jan 30 12:12:39 crc kubenswrapper[4703]: I0130 12:12:39.645930 4703 generic.go:334] "Generic (PLEG): container finished" podID="8e8648d2-1388-4187-b138-19c88fae3b94" containerID="703e5c28e4852a96ddc4d28006082a4b55ffd39297eed5677f841a4d9ce0eb6b" exitCode=0 Jan 30 12:12:39 crc kubenswrapper[4703]: I0130 12:12:39.646061 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p4sfq" event={"ID":"8e8648d2-1388-4187-b138-19c88fae3b94","Type":"ContainerDied","Data":"703e5c28e4852a96ddc4d28006082a4b55ffd39297eed5677f841a4d9ce0eb6b"} Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.008858 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9"] Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.009956 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.011903 4703 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.012196 4703 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-gkdhh" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.012457 4703 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.012970 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.017845 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.033667 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9"] Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.053509 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hptwp\" (UniqueName: \"kubernetes.io/projected/63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8-kube-api-access-hptwp\") pod \"metallb-operator-controller-manager-5585c54947-kz2s9\" (UID: \"63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8\") " pod="metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.053583 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8-apiservice-cert\") pod \"metallb-operator-controller-manager-5585c54947-kz2s9\" (UID: \"63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8\") " pod="metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.053624 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8-webhook-cert\") pod \"metallb-operator-controller-manager-5585c54947-kz2s9\" (UID: \"63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8\") " pod="metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.155552 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hptwp\" (UniqueName: \"kubernetes.io/projected/63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8-kube-api-access-hptwp\") pod \"metallb-operator-controller-manager-5585c54947-kz2s9\" (UID: \"63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8\") " pod="metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.155669 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8-apiservice-cert\") pod \"metallb-operator-controller-manager-5585c54947-kz2s9\" (UID: \"63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8\") " pod="metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.155753 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8-webhook-cert\") pod \"metallb-operator-controller-manager-5585c54947-kz2s9\" (UID: \"63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8\") " pod="metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.163941 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8-apiservice-cert\") pod \"metallb-operator-controller-manager-5585c54947-kz2s9\" (UID: \"63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8\") " pod="metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.170142 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8-webhook-cert\") pod \"metallb-operator-controller-manager-5585c54947-kz2s9\" (UID: \"63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8\") " pod="metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.176400 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hptwp\" (UniqueName: \"kubernetes.io/projected/63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8-kube-api-access-hptwp\") pod \"metallb-operator-controller-manager-5585c54947-kz2s9\" (UID: \"63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8\") " pod="metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.272146 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8"] Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.273229 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.280620 4703 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.280642 4703 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-w98hx" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.280939 4703 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.296106 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8"] Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.328439 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.358610 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9tqk\" (UniqueName: \"kubernetes.io/projected/c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715-kube-api-access-g9tqk\") pod \"metallb-operator-webhook-server-6dbbf674bb-smzg8\" (UID: \"c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715\") " pod="metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.358732 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715-apiservice-cert\") pod \"metallb-operator-webhook-server-6dbbf674bb-smzg8\" (UID: \"c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715\") " pod="metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.358763 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715-webhook-cert\") pod \"metallb-operator-webhook-server-6dbbf674bb-smzg8\" (UID: \"c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715\") " pod="metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.460786 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715-apiservice-cert\") pod \"metallb-operator-webhook-server-6dbbf674bb-smzg8\" (UID: \"c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715\") " pod="metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.460847 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715-webhook-cert\") pod \"metallb-operator-webhook-server-6dbbf674bb-smzg8\" (UID: \"c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715\") " pod="metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.460939 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9tqk\" (UniqueName: \"kubernetes.io/projected/c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715-kube-api-access-g9tqk\") pod \"metallb-operator-webhook-server-6dbbf674bb-smzg8\" (UID: \"c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715\") " pod="metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.470209 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715-apiservice-cert\") pod \"metallb-operator-webhook-server-6dbbf674bb-smzg8\" (UID: \"c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715\") " pod="metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.474970 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715-webhook-cert\") pod \"metallb-operator-webhook-server-6dbbf674bb-smzg8\" (UID: \"c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715\") " pod="metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.479893 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9tqk\" (UniqueName: \"kubernetes.io/projected/c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715-kube-api-access-g9tqk\") pod \"metallb-operator-webhook-server-6dbbf674bb-smzg8\" (UID: \"c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715\") " pod="metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.602484 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.668029 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p4sfq" event={"ID":"8e8648d2-1388-4187-b138-19c88fae3b94","Type":"ContainerStarted","Data":"a58309fc66bbe588daeb86e9bda62fdbfef34f66d272a82c9e339438ed476054"} Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.715427 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-p4sfq" podStartSLOduration=2.2722535329999998 podStartE2EDuration="4.71539656s" podCreationTimestamp="2026-01-30 12:12:36 +0000 UTC" firstStartedPulling="2026-01-30 12:12:37.632240306 +0000 UTC m=+993.410061960" lastFinishedPulling="2026-01-30 12:12:40.075383333 +0000 UTC m=+995.853204987" observedRunningTime="2026-01-30 12:12:40.711422132 +0000 UTC m=+996.489243786" watchObservedRunningTime="2026-01-30 12:12:40.71539656 +0000 UTC m=+996.493218214" Jan 30 12:12:40 crc kubenswrapper[4703]: I0130 12:12:40.769831 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9"] Jan 30 12:12:40 crc kubenswrapper[4703]: W0130 12:12:40.786315 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod63b2fd5c_5f47_4dcb_bad2_3f370b26a1e8.slice/crio-9331419bfb8936ea8437e15c219bb027cb3886f653fd32d34cd42e12572618ef WatchSource:0}: Error finding container 9331419bfb8936ea8437e15c219bb027cb3886f653fd32d34cd42e12572618ef: Status 404 returned error can't find the container with id 9331419bfb8936ea8437e15c219bb027cb3886f653fd32d34cd42e12572618ef Jan 30 12:12:41 crc kubenswrapper[4703]: I0130 12:12:41.047060 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8"] Jan 30 12:12:41 crc kubenswrapper[4703]: I0130 12:12:41.677933 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8" event={"ID":"c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715","Type":"ContainerStarted","Data":"965ea6e209a0b290e995e6a7fc421263be4a5867c857a80ed7b6ec46c420f772"} Jan 30 12:12:41 crc kubenswrapper[4703]: I0130 12:12:41.680690 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9" event={"ID":"63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8","Type":"ContainerStarted","Data":"9331419bfb8936ea8437e15c219bb027cb3886f653fd32d34cd42e12572618ef"} Jan 30 12:12:46 crc kubenswrapper[4703]: I0130 12:12:46.619321 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-p4sfq" Jan 30 12:12:46 crc kubenswrapper[4703]: I0130 12:12:46.621554 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-p4sfq" Jan 30 12:12:46 crc kubenswrapper[4703]: I0130 12:12:46.686241 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-p4sfq" Jan 30 12:12:46 crc kubenswrapper[4703]: I0130 12:12:46.694312 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-l57ct"] Jan 30 12:12:46 crc kubenswrapper[4703]: I0130 12:12:46.695906 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l57ct" Jan 30 12:12:46 crc kubenswrapper[4703]: I0130 12:12:46.713862 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l57ct"] Jan 30 12:12:46 crc kubenswrapper[4703]: I0130 12:12:46.789290 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-p4sfq" Jan 30 12:12:46 crc kubenswrapper[4703]: I0130 12:12:46.829165 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/095140d6-e764-45a6-97c1-858e526b2dd8-utilities\") pod \"certified-operators-l57ct\" (UID: \"095140d6-e764-45a6-97c1-858e526b2dd8\") " pod="openshift-marketplace/certified-operators-l57ct" Jan 30 12:12:46 crc kubenswrapper[4703]: I0130 12:12:46.829232 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfw2f\" (UniqueName: \"kubernetes.io/projected/095140d6-e764-45a6-97c1-858e526b2dd8-kube-api-access-qfw2f\") pod \"certified-operators-l57ct\" (UID: \"095140d6-e764-45a6-97c1-858e526b2dd8\") " pod="openshift-marketplace/certified-operators-l57ct" Jan 30 12:12:46 crc kubenswrapper[4703]: I0130 12:12:46.829384 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/095140d6-e764-45a6-97c1-858e526b2dd8-catalog-content\") pod \"certified-operators-l57ct\" (UID: \"095140d6-e764-45a6-97c1-858e526b2dd8\") " pod="openshift-marketplace/certified-operators-l57ct" Jan 30 12:12:46 crc kubenswrapper[4703]: I0130 12:12:46.934236 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/095140d6-e764-45a6-97c1-858e526b2dd8-catalog-content\") pod \"certified-operators-l57ct\" (UID: \"095140d6-e764-45a6-97c1-858e526b2dd8\") " pod="openshift-marketplace/certified-operators-l57ct" Jan 30 12:12:46 crc kubenswrapper[4703]: I0130 12:12:46.934365 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/095140d6-e764-45a6-97c1-858e526b2dd8-utilities\") pod \"certified-operators-l57ct\" (UID: \"095140d6-e764-45a6-97c1-858e526b2dd8\") " pod="openshift-marketplace/certified-operators-l57ct" Jan 30 12:12:46 crc kubenswrapper[4703]: I0130 12:12:46.934391 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfw2f\" (UniqueName: \"kubernetes.io/projected/095140d6-e764-45a6-97c1-858e526b2dd8-kube-api-access-qfw2f\") pod \"certified-operators-l57ct\" (UID: \"095140d6-e764-45a6-97c1-858e526b2dd8\") " pod="openshift-marketplace/certified-operators-l57ct" Jan 30 12:12:46 crc kubenswrapper[4703]: I0130 12:12:46.934985 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/095140d6-e764-45a6-97c1-858e526b2dd8-utilities\") pod \"certified-operators-l57ct\" (UID: \"095140d6-e764-45a6-97c1-858e526b2dd8\") " pod="openshift-marketplace/certified-operators-l57ct" Jan 30 12:12:46 crc kubenswrapper[4703]: I0130 12:12:46.935452 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/095140d6-e764-45a6-97c1-858e526b2dd8-catalog-content\") pod \"certified-operators-l57ct\" (UID: \"095140d6-e764-45a6-97c1-858e526b2dd8\") " pod="openshift-marketplace/certified-operators-l57ct" Jan 30 12:12:46 crc kubenswrapper[4703]: I0130 12:12:46.961330 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfw2f\" (UniqueName: \"kubernetes.io/projected/095140d6-e764-45a6-97c1-858e526b2dd8-kube-api-access-qfw2f\") pod \"certified-operators-l57ct\" (UID: \"095140d6-e764-45a6-97c1-858e526b2dd8\") " pod="openshift-marketplace/certified-operators-l57ct" Jan 30 12:12:47 crc kubenswrapper[4703]: I0130 12:12:47.018044 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l57ct" Jan 30 12:12:50 crc kubenswrapper[4703]: I0130 12:12:50.079482 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-p4sfq"] Jan 30 12:12:50 crc kubenswrapper[4703]: I0130 12:12:50.080299 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-p4sfq" podUID="8e8648d2-1388-4187-b138-19c88fae3b94" containerName="registry-server" containerID="cri-o://a58309fc66bbe588daeb86e9bda62fdbfef34f66d272a82c9e339438ed476054" gracePeriod=2 Jan 30 12:12:52 crc kubenswrapper[4703]: I0130 12:12:52.777329 4703 generic.go:334] "Generic (PLEG): container finished" podID="8e8648d2-1388-4187-b138-19c88fae3b94" containerID="a58309fc66bbe588daeb86e9bda62fdbfef34f66d272a82c9e339438ed476054" exitCode=0 Jan 30 12:12:52 crc kubenswrapper[4703]: I0130 12:12:52.777409 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p4sfq" event={"ID":"8e8648d2-1388-4187-b138-19c88fae3b94","Type":"ContainerDied","Data":"a58309fc66bbe588daeb86e9bda62fdbfef34f66d272a82c9e339438ed476054"} Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.015154 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p4sfq" Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.108631 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e8648d2-1388-4187-b138-19c88fae3b94-utilities\") pod \"8e8648d2-1388-4187-b138-19c88fae3b94\" (UID: \"8e8648d2-1388-4187-b138-19c88fae3b94\") " Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.108820 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e8648d2-1388-4187-b138-19c88fae3b94-catalog-content\") pod \"8e8648d2-1388-4187-b138-19c88fae3b94\" (UID: \"8e8648d2-1388-4187-b138-19c88fae3b94\") " Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.108853 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wpm7h\" (UniqueName: \"kubernetes.io/projected/8e8648d2-1388-4187-b138-19c88fae3b94-kube-api-access-wpm7h\") pod \"8e8648d2-1388-4187-b138-19c88fae3b94\" (UID: \"8e8648d2-1388-4187-b138-19c88fae3b94\") " Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.109795 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e8648d2-1388-4187-b138-19c88fae3b94-utilities" (OuterVolumeSpecName: "utilities") pod "8e8648d2-1388-4187-b138-19c88fae3b94" (UID: "8e8648d2-1388-4187-b138-19c88fae3b94"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.126385 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l57ct"] Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.131845 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e8648d2-1388-4187-b138-19c88fae3b94-kube-api-access-wpm7h" (OuterVolumeSpecName: "kube-api-access-wpm7h") pod "8e8648d2-1388-4187-b138-19c88fae3b94" (UID: "8e8648d2-1388-4187-b138-19c88fae3b94"). InnerVolumeSpecName "kube-api-access-wpm7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:12:54 crc kubenswrapper[4703]: W0130 12:12:54.139769 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod095140d6_e764_45a6_97c1_858e526b2dd8.slice/crio-0bbacbe29527314efb19e720ed8a15e96dc9664db3f601f582318afd792a49da WatchSource:0}: Error finding container 0bbacbe29527314efb19e720ed8a15e96dc9664db3f601f582318afd792a49da: Status 404 returned error can't find the container with id 0bbacbe29527314efb19e720ed8a15e96dc9664db3f601f582318afd792a49da Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.182648 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e8648d2-1388-4187-b138-19c88fae3b94-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8e8648d2-1388-4187-b138-19c88fae3b94" (UID: "8e8648d2-1388-4187-b138-19c88fae3b94"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.210595 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8e8648d2-1388-4187-b138-19c88fae3b94-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.210630 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8e8648d2-1388-4187-b138-19c88fae3b94-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.210645 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wpm7h\" (UniqueName: \"kubernetes.io/projected/8e8648d2-1388-4187-b138-19c88fae3b94-kube-api-access-wpm7h\") on node \"crc\" DevicePath \"\"" Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.817513 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9" event={"ID":"63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8","Type":"ContainerStarted","Data":"5b2a3ea1f5748262bbcfd31bbb05c6213ea4ae611c16eb231bddf2cfda18e847"} Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.817685 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9" Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.821153 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-p4sfq" event={"ID":"8e8648d2-1388-4187-b138-19c88fae3b94","Type":"ContainerDied","Data":"43c84eead637544e352f7f17d8efcf2e8444972f8af9e4fd00fa4b0ccbd15ef0"} Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.821232 4703 scope.go:117] "RemoveContainer" containerID="a58309fc66bbe588daeb86e9bda62fdbfef34f66d272a82c9e339438ed476054" Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.821371 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-p4sfq" Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.832033 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8" event={"ID":"c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715","Type":"ContainerStarted","Data":"a7f62f571bb33d33a5abfb2839fbffc733c7e4e825a5b88bfc1744add3dde959"} Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.832252 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8" Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.838319 4703 generic.go:334] "Generic (PLEG): container finished" podID="095140d6-e764-45a6-97c1-858e526b2dd8" containerID="4b8ef8b169ad077dcadfb391115c282cb986aa80989c77af6bc7e00d3ea125fc" exitCode=0 Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.838366 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l57ct" event={"ID":"095140d6-e764-45a6-97c1-858e526b2dd8","Type":"ContainerDied","Data":"4b8ef8b169ad077dcadfb391115c282cb986aa80989c77af6bc7e00d3ea125fc"} Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.838404 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l57ct" event={"ID":"095140d6-e764-45a6-97c1-858e526b2dd8","Type":"ContainerStarted","Data":"0bbacbe29527314efb19e720ed8a15e96dc9664db3f601f582318afd792a49da"} Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.860330 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9" podStartSLOduration=2.944507561 podStartE2EDuration="15.860302582s" podCreationTimestamp="2026-01-30 12:12:39 +0000 UTC" firstStartedPulling="2026-01-30 12:12:40.790550025 +0000 UTC m=+996.568371679" lastFinishedPulling="2026-01-30 12:12:53.706345046 +0000 UTC m=+1009.484166700" observedRunningTime="2026-01-30 12:12:54.858874684 +0000 UTC m=+1010.636696348" watchObservedRunningTime="2026-01-30 12:12:54.860302582 +0000 UTC m=+1010.638124226" Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.865647 4703 scope.go:117] "RemoveContainer" containerID="703e5c28e4852a96ddc4d28006082a4b55ffd39297eed5677f841a4d9ce0eb6b" Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.889765 4703 scope.go:117] "RemoveContainer" containerID="43bae8e9c761e5db86bae4cac21022cc25674389653949cf3b17882b425d77f4" Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.898985 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8" podStartSLOduration=2.280888296 podStartE2EDuration="14.898955504s" podCreationTimestamp="2026-01-30 12:12:40 +0000 UTC" firstStartedPulling="2026-01-30 12:12:41.058370932 +0000 UTC m=+996.836192586" lastFinishedPulling="2026-01-30 12:12:53.67643814 +0000 UTC m=+1009.454259794" observedRunningTime="2026-01-30 12:12:54.894006341 +0000 UTC m=+1010.671828025" watchObservedRunningTime="2026-01-30 12:12:54.898955504 +0000 UTC m=+1010.676777168" Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.938584 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-p4sfq"] Jan 30 12:12:54 crc kubenswrapper[4703]: I0130 12:12:54.944279 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-p4sfq"] Jan 30 12:12:55 crc kubenswrapper[4703]: I0130 12:12:55.094748 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e8648d2-1388-4187-b138-19c88fae3b94" path="/var/lib/kubelet/pods/8e8648d2-1388-4187-b138-19c88fae3b94/volumes" Jan 30 12:12:56 crc kubenswrapper[4703]: I0130 12:12:56.940598 4703 generic.go:334] "Generic (PLEG): container finished" podID="095140d6-e764-45a6-97c1-858e526b2dd8" containerID="6443abadd81a2762bbfc59d2dac00d9e2953ae9a87e8a67e0adb466110d6fb19" exitCode=0 Jan 30 12:12:56 crc kubenswrapper[4703]: I0130 12:12:56.940783 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l57ct" event={"ID":"095140d6-e764-45a6-97c1-858e526b2dd8","Type":"ContainerDied","Data":"6443abadd81a2762bbfc59d2dac00d9e2953ae9a87e8a67e0adb466110d6fb19"} Jan 30 12:12:57 crc kubenswrapper[4703]: I0130 12:12:57.951492 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l57ct" event={"ID":"095140d6-e764-45a6-97c1-858e526b2dd8","Type":"ContainerStarted","Data":"fb6a523e55d75d596d3116bb7ff6991e2527cc5e949b480189efbabb776ec66b"} Jan 30 12:12:57 crc kubenswrapper[4703]: I0130 12:12:57.978700 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-l57ct" podStartSLOduration=9.443422306 podStartE2EDuration="11.978673936s" podCreationTimestamp="2026-01-30 12:12:46 +0000 UTC" firstStartedPulling="2026-01-30 12:12:54.840304584 +0000 UTC m=+1010.618126238" lastFinishedPulling="2026-01-30 12:12:57.375556214 +0000 UTC m=+1013.153377868" observedRunningTime="2026-01-30 12:12:57.976717893 +0000 UTC m=+1013.754539557" watchObservedRunningTime="2026-01-30 12:12:57.978673936 +0000 UTC m=+1013.756495590" Jan 30 12:13:03 crc kubenswrapper[4703]: I0130 12:13:03.913161 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-s9vng"] Jan 30 12:13:03 crc kubenswrapper[4703]: E0130 12:13:03.914162 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e8648d2-1388-4187-b138-19c88fae3b94" containerName="registry-server" Jan 30 12:13:03 crc kubenswrapper[4703]: I0130 12:13:03.914185 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e8648d2-1388-4187-b138-19c88fae3b94" containerName="registry-server" Jan 30 12:13:03 crc kubenswrapper[4703]: E0130 12:13:03.914206 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e8648d2-1388-4187-b138-19c88fae3b94" containerName="extract-content" Jan 30 12:13:03 crc kubenswrapper[4703]: I0130 12:13:03.914215 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e8648d2-1388-4187-b138-19c88fae3b94" containerName="extract-content" Jan 30 12:13:03 crc kubenswrapper[4703]: E0130 12:13:03.914227 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e8648d2-1388-4187-b138-19c88fae3b94" containerName="extract-utilities" Jan 30 12:13:03 crc kubenswrapper[4703]: I0130 12:13:03.914238 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e8648d2-1388-4187-b138-19c88fae3b94" containerName="extract-utilities" Jan 30 12:13:03 crc kubenswrapper[4703]: I0130 12:13:03.914414 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e8648d2-1388-4187-b138-19c88fae3b94" containerName="registry-server" Jan 30 12:13:03 crc kubenswrapper[4703]: I0130 12:13:03.915754 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s9vng" Jan 30 12:13:03 crc kubenswrapper[4703]: I0130 12:13:03.933279 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s9vng"] Jan 30 12:13:04 crc kubenswrapper[4703]: I0130 12:13:04.055490 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4f8970c-5b9f-47ca-bb90-14e126a43592-utilities\") pod \"redhat-marketplace-s9vng\" (UID: \"f4f8970c-5b9f-47ca-bb90-14e126a43592\") " pod="openshift-marketplace/redhat-marketplace-s9vng" Jan 30 12:13:04 crc kubenswrapper[4703]: I0130 12:13:04.055675 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4f8970c-5b9f-47ca-bb90-14e126a43592-catalog-content\") pod \"redhat-marketplace-s9vng\" (UID: \"f4f8970c-5b9f-47ca-bb90-14e126a43592\") " pod="openshift-marketplace/redhat-marketplace-s9vng" Jan 30 12:13:04 crc kubenswrapper[4703]: I0130 12:13:04.055739 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8c52w\" (UniqueName: \"kubernetes.io/projected/f4f8970c-5b9f-47ca-bb90-14e126a43592-kube-api-access-8c52w\") pod \"redhat-marketplace-s9vng\" (UID: \"f4f8970c-5b9f-47ca-bb90-14e126a43592\") " pod="openshift-marketplace/redhat-marketplace-s9vng" Jan 30 12:13:04 crc kubenswrapper[4703]: I0130 12:13:04.156812 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4f8970c-5b9f-47ca-bb90-14e126a43592-catalog-content\") pod \"redhat-marketplace-s9vng\" (UID: \"f4f8970c-5b9f-47ca-bb90-14e126a43592\") " pod="openshift-marketplace/redhat-marketplace-s9vng" Jan 30 12:13:04 crc kubenswrapper[4703]: I0130 12:13:04.156885 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8c52w\" (UniqueName: \"kubernetes.io/projected/f4f8970c-5b9f-47ca-bb90-14e126a43592-kube-api-access-8c52w\") pod \"redhat-marketplace-s9vng\" (UID: \"f4f8970c-5b9f-47ca-bb90-14e126a43592\") " pod="openshift-marketplace/redhat-marketplace-s9vng" Jan 30 12:13:04 crc kubenswrapper[4703]: I0130 12:13:04.156935 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4f8970c-5b9f-47ca-bb90-14e126a43592-utilities\") pod \"redhat-marketplace-s9vng\" (UID: \"f4f8970c-5b9f-47ca-bb90-14e126a43592\") " pod="openshift-marketplace/redhat-marketplace-s9vng" Jan 30 12:13:04 crc kubenswrapper[4703]: I0130 12:13:04.157576 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4f8970c-5b9f-47ca-bb90-14e126a43592-utilities\") pod \"redhat-marketplace-s9vng\" (UID: \"f4f8970c-5b9f-47ca-bb90-14e126a43592\") " pod="openshift-marketplace/redhat-marketplace-s9vng" Jan 30 12:13:04 crc kubenswrapper[4703]: I0130 12:13:04.157743 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4f8970c-5b9f-47ca-bb90-14e126a43592-catalog-content\") pod \"redhat-marketplace-s9vng\" (UID: \"f4f8970c-5b9f-47ca-bb90-14e126a43592\") " pod="openshift-marketplace/redhat-marketplace-s9vng" Jan 30 12:13:04 crc kubenswrapper[4703]: I0130 12:13:04.183310 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8c52w\" (UniqueName: \"kubernetes.io/projected/f4f8970c-5b9f-47ca-bb90-14e126a43592-kube-api-access-8c52w\") pod \"redhat-marketplace-s9vng\" (UID: \"f4f8970c-5b9f-47ca-bb90-14e126a43592\") " pod="openshift-marketplace/redhat-marketplace-s9vng" Jan 30 12:13:04 crc kubenswrapper[4703]: I0130 12:13:04.243203 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s9vng" Jan 30 12:13:04 crc kubenswrapper[4703]: I0130 12:13:04.739930 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s9vng"] Jan 30 12:13:05 crc kubenswrapper[4703]: I0130 12:13:05.007572 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s9vng" event={"ID":"f4f8970c-5b9f-47ca-bb90-14e126a43592","Type":"ContainerStarted","Data":"0c7f8e7240be5cfd83f4743792428152b18d5129ac46bde258ade0b24603b3b6"} Jan 30 12:13:05 crc kubenswrapper[4703]: I0130 12:13:05.008041 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s9vng" event={"ID":"f4f8970c-5b9f-47ca-bb90-14e126a43592","Type":"ContainerStarted","Data":"8c2da06e42800105ee9c0d1518d0d7a4b78de88d8968286df9b8748c4a7b1e56"} Jan 30 12:13:06 crc kubenswrapper[4703]: I0130 12:13:06.015610 4703 generic.go:334] "Generic (PLEG): container finished" podID="f4f8970c-5b9f-47ca-bb90-14e126a43592" containerID="0c7f8e7240be5cfd83f4743792428152b18d5129ac46bde258ade0b24603b3b6" exitCode=0 Jan 30 12:13:06 crc kubenswrapper[4703]: I0130 12:13:06.016293 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s9vng" event={"ID":"f4f8970c-5b9f-47ca-bb90-14e126a43592","Type":"ContainerDied","Data":"0c7f8e7240be5cfd83f4743792428152b18d5129ac46bde258ade0b24603b3b6"} Jan 30 12:13:07 crc kubenswrapper[4703]: I0130 12:13:07.018515 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-l57ct" Jan 30 12:13:07 crc kubenswrapper[4703]: I0130 12:13:07.019179 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-l57ct" Jan 30 12:13:07 crc kubenswrapper[4703]: I0130 12:13:07.026148 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s9vng" event={"ID":"f4f8970c-5b9f-47ca-bb90-14e126a43592","Type":"ContainerStarted","Data":"3eb188115b134f02d3dbb02dbd45f9c49446054c91c01e7797e57c892c423a22"} Jan 30 12:13:07 crc kubenswrapper[4703]: I0130 12:13:07.068711 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-l57ct" Jan 30 12:13:08 crc kubenswrapper[4703]: I0130 12:13:08.035991 4703 generic.go:334] "Generic (PLEG): container finished" podID="f4f8970c-5b9f-47ca-bb90-14e126a43592" containerID="3eb188115b134f02d3dbb02dbd45f9c49446054c91c01e7797e57c892c423a22" exitCode=0 Jan 30 12:13:08 crc kubenswrapper[4703]: I0130 12:13:08.039353 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s9vng" event={"ID":"f4f8970c-5b9f-47ca-bb90-14e126a43592","Type":"ContainerDied","Data":"3eb188115b134f02d3dbb02dbd45f9c49446054c91c01e7797e57c892c423a22"} Jan 30 12:13:08 crc kubenswrapper[4703]: I0130 12:13:08.099313 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-l57ct" Jan 30 12:13:09 crc kubenswrapper[4703]: I0130 12:13:09.046436 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s9vng" event={"ID":"f4f8970c-5b9f-47ca-bb90-14e126a43592","Type":"ContainerStarted","Data":"483e8595aa0fc5a9e67e7c4cfdf724d0d449db4d2e7bac97ea2a908c5d04e9ef"} Jan 30 12:13:09 crc kubenswrapper[4703]: I0130 12:13:09.074471 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-s9vng" podStartSLOduration=3.532346078 podStartE2EDuration="6.074440921s" podCreationTimestamp="2026-01-30 12:13:03 +0000 UTC" firstStartedPulling="2026-01-30 12:13:06.0199231 +0000 UTC m=+1021.797744754" lastFinishedPulling="2026-01-30 12:13:08.562017933 +0000 UTC m=+1024.339839597" observedRunningTime="2026-01-30 12:13:09.070633769 +0000 UTC m=+1024.848455433" watchObservedRunningTime="2026-01-30 12:13:09.074440921 +0000 UTC m=+1024.852262585" Jan 30 12:13:10 crc kubenswrapper[4703]: I0130 12:13:10.607766 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-6dbbf674bb-smzg8" Jan 30 12:13:10 crc kubenswrapper[4703]: I0130 12:13:10.879387 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l57ct"] Jan 30 12:13:10 crc kubenswrapper[4703]: I0130 12:13:10.879763 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-l57ct" podUID="095140d6-e764-45a6-97c1-858e526b2dd8" containerName="registry-server" containerID="cri-o://fb6a523e55d75d596d3116bb7ff6991e2527cc5e949b480189efbabb776ec66b" gracePeriod=2 Jan 30 12:13:11 crc kubenswrapper[4703]: I0130 12:13:11.070075 4703 generic.go:334] "Generic (PLEG): container finished" podID="095140d6-e764-45a6-97c1-858e526b2dd8" containerID="fb6a523e55d75d596d3116bb7ff6991e2527cc5e949b480189efbabb776ec66b" exitCode=0 Jan 30 12:13:11 crc kubenswrapper[4703]: I0130 12:13:11.070222 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l57ct" event={"ID":"095140d6-e764-45a6-97c1-858e526b2dd8","Type":"ContainerDied","Data":"fb6a523e55d75d596d3116bb7ff6991e2527cc5e949b480189efbabb776ec66b"} Jan 30 12:13:11 crc kubenswrapper[4703]: I0130 12:13:11.363956 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l57ct" Jan 30 12:13:11 crc kubenswrapper[4703]: I0130 12:13:11.479257 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/095140d6-e764-45a6-97c1-858e526b2dd8-catalog-content\") pod \"095140d6-e764-45a6-97c1-858e526b2dd8\" (UID: \"095140d6-e764-45a6-97c1-858e526b2dd8\") " Jan 30 12:13:11 crc kubenswrapper[4703]: I0130 12:13:11.479315 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qfw2f\" (UniqueName: \"kubernetes.io/projected/095140d6-e764-45a6-97c1-858e526b2dd8-kube-api-access-qfw2f\") pod \"095140d6-e764-45a6-97c1-858e526b2dd8\" (UID: \"095140d6-e764-45a6-97c1-858e526b2dd8\") " Jan 30 12:13:11 crc kubenswrapper[4703]: I0130 12:13:11.479341 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/095140d6-e764-45a6-97c1-858e526b2dd8-utilities\") pod \"095140d6-e764-45a6-97c1-858e526b2dd8\" (UID: \"095140d6-e764-45a6-97c1-858e526b2dd8\") " Jan 30 12:13:11 crc kubenswrapper[4703]: I0130 12:13:11.480678 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/095140d6-e764-45a6-97c1-858e526b2dd8-utilities" (OuterVolumeSpecName: "utilities") pod "095140d6-e764-45a6-97c1-858e526b2dd8" (UID: "095140d6-e764-45a6-97c1-858e526b2dd8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:13:11 crc kubenswrapper[4703]: I0130 12:13:11.495616 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/095140d6-e764-45a6-97c1-858e526b2dd8-kube-api-access-qfw2f" (OuterVolumeSpecName: "kube-api-access-qfw2f") pod "095140d6-e764-45a6-97c1-858e526b2dd8" (UID: "095140d6-e764-45a6-97c1-858e526b2dd8"). InnerVolumeSpecName "kube-api-access-qfw2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:13:11 crc kubenswrapper[4703]: I0130 12:13:11.525971 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/095140d6-e764-45a6-97c1-858e526b2dd8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "095140d6-e764-45a6-97c1-858e526b2dd8" (UID: "095140d6-e764-45a6-97c1-858e526b2dd8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:13:11 crc kubenswrapper[4703]: I0130 12:13:11.581444 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/095140d6-e764-45a6-97c1-858e526b2dd8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:13:11 crc kubenswrapper[4703]: I0130 12:13:11.581541 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qfw2f\" (UniqueName: \"kubernetes.io/projected/095140d6-e764-45a6-97c1-858e526b2dd8-kube-api-access-qfw2f\") on node \"crc\" DevicePath \"\"" Jan 30 12:13:11 crc kubenswrapper[4703]: I0130 12:13:11.581560 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/095140d6-e764-45a6-97c1-858e526b2dd8-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:13:12 crc kubenswrapper[4703]: I0130 12:13:12.080705 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l57ct" event={"ID":"095140d6-e764-45a6-97c1-858e526b2dd8","Type":"ContainerDied","Data":"0bbacbe29527314efb19e720ed8a15e96dc9664db3f601f582318afd792a49da"} Jan 30 12:13:12 crc kubenswrapper[4703]: I0130 12:13:12.080781 4703 scope.go:117] "RemoveContainer" containerID="fb6a523e55d75d596d3116bb7ff6991e2527cc5e949b480189efbabb776ec66b" Jan 30 12:13:12 crc kubenswrapper[4703]: I0130 12:13:12.080805 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l57ct" Jan 30 12:13:12 crc kubenswrapper[4703]: I0130 12:13:12.101553 4703 scope.go:117] "RemoveContainer" containerID="6443abadd81a2762bbfc59d2dac00d9e2953ae9a87e8a67e0adb466110d6fb19" Jan 30 12:13:12 crc kubenswrapper[4703]: I0130 12:13:12.120498 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l57ct"] Jan 30 12:13:12 crc kubenswrapper[4703]: I0130 12:13:12.122444 4703 scope.go:117] "RemoveContainer" containerID="4b8ef8b169ad077dcadfb391115c282cb986aa80989c77af6bc7e00d3ea125fc" Jan 30 12:13:12 crc kubenswrapper[4703]: I0130 12:13:12.125797 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-l57ct"] Jan 30 12:13:12 crc kubenswrapper[4703]: I0130 12:13:12.823712 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:13:12 crc kubenswrapper[4703]: I0130 12:13:12.824363 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:13:13 crc kubenswrapper[4703]: I0130 12:13:13.109216 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="095140d6-e764-45a6-97c1-858e526b2dd8" path="/var/lib/kubelet/pods/095140d6-e764-45a6-97c1-858e526b2dd8/volumes" Jan 30 12:13:14 crc kubenswrapper[4703]: I0130 12:13:14.244496 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-s9vng" Jan 30 12:13:14 crc kubenswrapper[4703]: I0130 12:13:14.244579 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-s9vng" Jan 30 12:13:14 crc kubenswrapper[4703]: I0130 12:13:14.292284 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-s9vng" Jan 30 12:13:15 crc kubenswrapper[4703]: I0130 12:13:15.166341 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-s9vng" Jan 30 12:13:16 crc kubenswrapper[4703]: I0130 12:13:16.284874 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s9vng"] Jan 30 12:13:17 crc kubenswrapper[4703]: I0130 12:13:17.125058 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-s9vng" podUID="f4f8970c-5b9f-47ca-bb90-14e126a43592" containerName="registry-server" containerID="cri-o://483e8595aa0fc5a9e67e7c4cfdf724d0d449db4d2e7bac97ea2a908c5d04e9ef" gracePeriod=2 Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.035714 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s9vng" Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.108895 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8c52w\" (UniqueName: \"kubernetes.io/projected/f4f8970c-5b9f-47ca-bb90-14e126a43592-kube-api-access-8c52w\") pod \"f4f8970c-5b9f-47ca-bb90-14e126a43592\" (UID: \"f4f8970c-5b9f-47ca-bb90-14e126a43592\") " Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.109003 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4f8970c-5b9f-47ca-bb90-14e126a43592-utilities\") pod \"f4f8970c-5b9f-47ca-bb90-14e126a43592\" (UID: \"f4f8970c-5b9f-47ca-bb90-14e126a43592\") " Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.109938 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4f8970c-5b9f-47ca-bb90-14e126a43592-utilities" (OuterVolumeSpecName: "utilities") pod "f4f8970c-5b9f-47ca-bb90-14e126a43592" (UID: "f4f8970c-5b9f-47ca-bb90-14e126a43592"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.118488 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4f8970c-5b9f-47ca-bb90-14e126a43592-kube-api-access-8c52w" (OuterVolumeSpecName: "kube-api-access-8c52w") pod "f4f8970c-5b9f-47ca-bb90-14e126a43592" (UID: "f4f8970c-5b9f-47ca-bb90-14e126a43592"). InnerVolumeSpecName "kube-api-access-8c52w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.188473 4703 generic.go:334] "Generic (PLEG): container finished" podID="f4f8970c-5b9f-47ca-bb90-14e126a43592" containerID="483e8595aa0fc5a9e67e7c4cfdf724d0d449db4d2e7bac97ea2a908c5d04e9ef" exitCode=0 Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.188540 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s9vng" event={"ID":"f4f8970c-5b9f-47ca-bb90-14e126a43592","Type":"ContainerDied","Data":"483e8595aa0fc5a9e67e7c4cfdf724d0d449db4d2e7bac97ea2a908c5d04e9ef"} Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.188589 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s9vng" event={"ID":"f4f8970c-5b9f-47ca-bb90-14e126a43592","Type":"ContainerDied","Data":"8c2da06e42800105ee9c0d1518d0d7a4b78de88d8968286df9b8748c4a7b1e56"} Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.188621 4703 scope.go:117] "RemoveContainer" containerID="483e8595aa0fc5a9e67e7c4cfdf724d0d449db4d2e7bac97ea2a908c5d04e9ef" Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.188838 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s9vng" Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.210194 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4f8970c-5b9f-47ca-bb90-14e126a43592-catalog-content\") pod \"f4f8970c-5b9f-47ca-bb90-14e126a43592\" (UID: \"f4f8970c-5b9f-47ca-bb90-14e126a43592\") " Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.210510 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8c52w\" (UniqueName: \"kubernetes.io/projected/f4f8970c-5b9f-47ca-bb90-14e126a43592-kube-api-access-8c52w\") on node \"crc\" DevicePath \"\"" Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.210526 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f4f8970c-5b9f-47ca-bb90-14e126a43592-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.256800 4703 scope.go:117] "RemoveContainer" containerID="3eb188115b134f02d3dbb02dbd45f9c49446054c91c01e7797e57c892c423a22" Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.258244 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4f8970c-5b9f-47ca-bb90-14e126a43592-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f4f8970c-5b9f-47ca-bb90-14e126a43592" (UID: "f4f8970c-5b9f-47ca-bb90-14e126a43592"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.276842 4703 scope.go:117] "RemoveContainer" containerID="0c7f8e7240be5cfd83f4743792428152b18d5129ac46bde258ade0b24603b3b6" Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.295820 4703 scope.go:117] "RemoveContainer" containerID="483e8595aa0fc5a9e67e7c4cfdf724d0d449db4d2e7bac97ea2a908c5d04e9ef" Jan 30 12:13:18 crc kubenswrapper[4703]: E0130 12:13:18.296646 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"483e8595aa0fc5a9e67e7c4cfdf724d0d449db4d2e7bac97ea2a908c5d04e9ef\": container with ID starting with 483e8595aa0fc5a9e67e7c4cfdf724d0d449db4d2e7bac97ea2a908c5d04e9ef not found: ID does not exist" containerID="483e8595aa0fc5a9e67e7c4cfdf724d0d449db4d2e7bac97ea2a908c5d04e9ef" Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.296706 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"483e8595aa0fc5a9e67e7c4cfdf724d0d449db4d2e7bac97ea2a908c5d04e9ef"} err="failed to get container status \"483e8595aa0fc5a9e67e7c4cfdf724d0d449db4d2e7bac97ea2a908c5d04e9ef\": rpc error: code = NotFound desc = could not find container \"483e8595aa0fc5a9e67e7c4cfdf724d0d449db4d2e7bac97ea2a908c5d04e9ef\": container with ID starting with 483e8595aa0fc5a9e67e7c4cfdf724d0d449db4d2e7bac97ea2a908c5d04e9ef not found: ID does not exist" Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.296745 4703 scope.go:117] "RemoveContainer" containerID="3eb188115b134f02d3dbb02dbd45f9c49446054c91c01e7797e57c892c423a22" Jan 30 12:13:18 crc kubenswrapper[4703]: E0130 12:13:18.297384 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3eb188115b134f02d3dbb02dbd45f9c49446054c91c01e7797e57c892c423a22\": container with ID starting with 3eb188115b134f02d3dbb02dbd45f9c49446054c91c01e7797e57c892c423a22 not found: ID does not exist" containerID="3eb188115b134f02d3dbb02dbd45f9c49446054c91c01e7797e57c892c423a22" Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.297420 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3eb188115b134f02d3dbb02dbd45f9c49446054c91c01e7797e57c892c423a22"} err="failed to get container status \"3eb188115b134f02d3dbb02dbd45f9c49446054c91c01e7797e57c892c423a22\": rpc error: code = NotFound desc = could not find container \"3eb188115b134f02d3dbb02dbd45f9c49446054c91c01e7797e57c892c423a22\": container with ID starting with 3eb188115b134f02d3dbb02dbd45f9c49446054c91c01e7797e57c892c423a22 not found: ID does not exist" Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.297437 4703 scope.go:117] "RemoveContainer" containerID="0c7f8e7240be5cfd83f4743792428152b18d5129ac46bde258ade0b24603b3b6" Jan 30 12:13:18 crc kubenswrapper[4703]: E0130 12:13:18.297727 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c7f8e7240be5cfd83f4743792428152b18d5129ac46bde258ade0b24603b3b6\": container with ID starting with 0c7f8e7240be5cfd83f4743792428152b18d5129ac46bde258ade0b24603b3b6 not found: ID does not exist" containerID="0c7f8e7240be5cfd83f4743792428152b18d5129ac46bde258ade0b24603b3b6" Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.297764 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c7f8e7240be5cfd83f4743792428152b18d5129ac46bde258ade0b24603b3b6"} err="failed to get container status \"0c7f8e7240be5cfd83f4743792428152b18d5129ac46bde258ade0b24603b3b6\": rpc error: code = NotFound desc = could not find container \"0c7f8e7240be5cfd83f4743792428152b18d5129ac46bde258ade0b24603b3b6\": container with ID starting with 0c7f8e7240be5cfd83f4743792428152b18d5129ac46bde258ade0b24603b3b6 not found: ID does not exist" Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.311474 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f4f8970c-5b9f-47ca-bb90-14e126a43592-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.523313 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s9vng"] Jan 30 12:13:18 crc kubenswrapper[4703]: I0130 12:13:18.529681 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-s9vng"] Jan 30 12:13:19 crc kubenswrapper[4703]: I0130 12:13:19.097765 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4f8970c-5b9f-47ca-bb90-14e126a43592" path="/var/lib/kubelet/pods/f4f8970c-5b9f-47ca-bb90-14e126a43592/volumes" Jan 30 12:13:30 crc kubenswrapper[4703]: I0130 12:13:30.332190 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-5585c54947-kz2s9" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.097328 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-krzld"] Jan 30 12:13:31 crc kubenswrapper[4703]: E0130 12:13:31.098420 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4f8970c-5b9f-47ca-bb90-14e126a43592" containerName="extract-content" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.098453 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4f8970c-5b9f-47ca-bb90-14e126a43592" containerName="extract-content" Jan 30 12:13:31 crc kubenswrapper[4703]: E0130 12:13:31.098464 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="095140d6-e764-45a6-97c1-858e526b2dd8" containerName="registry-server" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.098472 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="095140d6-e764-45a6-97c1-858e526b2dd8" containerName="registry-server" Jan 30 12:13:31 crc kubenswrapper[4703]: E0130 12:13:31.098479 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4f8970c-5b9f-47ca-bb90-14e126a43592" containerName="registry-server" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.098486 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4f8970c-5b9f-47ca-bb90-14e126a43592" containerName="registry-server" Jan 30 12:13:31 crc kubenswrapper[4703]: E0130 12:13:31.098510 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="095140d6-e764-45a6-97c1-858e526b2dd8" containerName="extract-utilities" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.098517 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="095140d6-e764-45a6-97c1-858e526b2dd8" containerName="extract-utilities" Jan 30 12:13:31 crc kubenswrapper[4703]: E0130 12:13:31.098531 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4f8970c-5b9f-47ca-bb90-14e126a43592" containerName="extract-utilities" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.098538 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4f8970c-5b9f-47ca-bb90-14e126a43592" containerName="extract-utilities" Jan 30 12:13:31 crc kubenswrapper[4703]: E0130 12:13:31.098554 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="095140d6-e764-45a6-97c1-858e526b2dd8" containerName="extract-content" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.098562 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="095140d6-e764-45a6-97c1-858e526b2dd8" containerName="extract-content" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.098749 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="095140d6-e764-45a6-97c1-858e526b2dd8" containerName="registry-server" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.098769 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4f8970c-5b9f-47ca-bb90-14e126a43592" containerName="registry-server" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.102075 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.104065 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.104372 4703 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-phl9g" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.104540 4703 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.108202 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-bhs52"] Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.109482 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-bhs52" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.111973 4703 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.134513 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-bhs52"] Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.167627 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6jqf\" (UniqueName: \"kubernetes.io/projected/fe952b2e-c0d5-4420-a5ba-7128c73c6b3e-kube-api-access-h6jqf\") pod \"frr-k8s-webhook-server-7df86c4f6c-bhs52\" (UID: \"fe952b2e-c0d5-4420-a5ba-7128c73c6b3e\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-bhs52" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.167691 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8h5vp\" (UniqueName: \"kubernetes.io/projected/2bb59434-7622-4640-bb0c-28839fa5405c-kube-api-access-8h5vp\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.167754 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/2bb59434-7622-4640-bb0c-28839fa5405c-reloader\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.167792 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fe952b2e-c0d5-4420-a5ba-7128c73c6b3e-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-bhs52\" (UID: \"fe952b2e-c0d5-4420-a5ba-7128c73c6b3e\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-bhs52" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.167814 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/2bb59434-7622-4640-bb0c-28839fa5405c-frr-startup\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.167868 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/2bb59434-7622-4640-bb0c-28839fa5405c-frr-sockets\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.167899 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/2bb59434-7622-4640-bb0c-28839fa5405c-metrics\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.167938 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2bb59434-7622-4640-bb0c-28839fa5405c-metrics-certs\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.167960 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/2bb59434-7622-4640-bb0c-28839fa5405c-frr-conf\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.199665 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-c59pj"] Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.201300 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-c59pj" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.204177 4703 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.204620 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.204808 4703 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.204981 4703 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-nwzz9" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.239279 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-mqw8c"] Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.243599 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-mqw8c" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.249722 4703 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.269874 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8mkc\" (UniqueName: \"kubernetes.io/projected/e0696463-355f-411b-89b5-9a0bd3211101-kube-api-access-d8mkc\") pod \"speaker-c59pj\" (UID: \"e0696463-355f-411b-89b5-9a0bd3211101\") " pod="metallb-system/speaker-c59pj" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.270006 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6jqf\" (UniqueName: \"kubernetes.io/projected/fe952b2e-c0d5-4420-a5ba-7128c73c6b3e-kube-api-access-h6jqf\") pod \"frr-k8s-webhook-server-7df86c4f6c-bhs52\" (UID: \"fe952b2e-c0d5-4420-a5ba-7128c73c6b3e\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-bhs52" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.270041 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8h5vp\" (UniqueName: \"kubernetes.io/projected/2bb59434-7622-4640-bb0c-28839fa5405c-kube-api-access-8h5vp\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.270081 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fe952b2e-c0d5-4420-a5ba-7128c73c6b3e-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-bhs52\" (UID: \"fe952b2e-c0d5-4420-a5ba-7128c73c6b3e\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-bhs52" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.270115 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/2bb59434-7622-4640-bb0c-28839fa5405c-reloader\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.270182 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/2bb59434-7622-4640-bb0c-28839fa5405c-frr-startup\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.270218 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e0696463-355f-411b-89b5-9a0bd3211101-metallb-excludel2\") pod \"speaker-c59pj\" (UID: \"e0696463-355f-411b-89b5-9a0bd3211101\") " pod="metallb-system/speaker-c59pj" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.270273 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/2bb59434-7622-4640-bb0c-28839fa5405c-frr-sockets\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.270321 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e0696463-355f-411b-89b5-9a0bd3211101-memberlist\") pod \"speaker-c59pj\" (UID: \"e0696463-355f-411b-89b5-9a0bd3211101\") " pod="metallb-system/speaker-c59pj" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.270350 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/2bb59434-7622-4640-bb0c-28839fa5405c-metrics\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.270660 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2bb59434-7622-4640-bb0c-28839fa5405c-metrics-certs\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.270774 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/2bb59434-7622-4640-bb0c-28839fa5405c-frr-conf\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.270829 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e0696463-355f-411b-89b5-9a0bd3211101-metrics-certs\") pod \"speaker-c59pj\" (UID: \"e0696463-355f-411b-89b5-9a0bd3211101\") " pod="metallb-system/speaker-c59pj" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.271168 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/2bb59434-7622-4640-bb0c-28839fa5405c-metrics\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.271204 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-mqw8c"] Jan 30 12:13:31 crc kubenswrapper[4703]: E0130 12:13:31.271600 4703 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.271664 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/2bb59434-7622-4640-bb0c-28839fa5405c-frr-conf\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: E0130 12:13:31.271803 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2bb59434-7622-4640-bb0c-28839fa5405c-metrics-certs podName:2bb59434-7622-4640-bb0c-28839fa5405c nodeName:}" failed. No retries permitted until 2026-01-30 12:13:31.771718768 +0000 UTC m=+1047.549540422 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2bb59434-7622-4640-bb0c-28839fa5405c-metrics-certs") pod "frr-k8s-krzld" (UID: "2bb59434-7622-4640-bb0c-28839fa5405c") : secret "frr-k8s-certs-secret" not found Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.271923 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/2bb59434-7622-4640-bb0c-28839fa5405c-reloader\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.272628 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/2bb59434-7622-4640-bb0c-28839fa5405c-frr-sockets\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.272861 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/2bb59434-7622-4640-bb0c-28839fa5405c-frr-startup\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.298355 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8h5vp\" (UniqueName: \"kubernetes.io/projected/2bb59434-7622-4640-bb0c-28839fa5405c-kube-api-access-8h5vp\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.298992 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6jqf\" (UniqueName: \"kubernetes.io/projected/fe952b2e-c0d5-4420-a5ba-7128c73c6b3e-kube-api-access-h6jqf\") pod \"frr-k8s-webhook-server-7df86c4f6c-bhs52\" (UID: \"fe952b2e-c0d5-4420-a5ba-7128c73c6b3e\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-bhs52" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.309812 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fe952b2e-c0d5-4420-a5ba-7128c73c6b3e-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-bhs52\" (UID: \"fe952b2e-c0d5-4420-a5ba-7128c73c6b3e\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-bhs52" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.372740 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6m58j\" (UniqueName: \"kubernetes.io/projected/a8fcb989-a426-48fb-bf36-bf97b534f004-kube-api-access-6m58j\") pod \"controller-6968d8fdc4-mqw8c\" (UID: \"a8fcb989-a426-48fb-bf36-bf97b534f004\") " pod="metallb-system/controller-6968d8fdc4-mqw8c" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.372860 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e0696463-355f-411b-89b5-9a0bd3211101-memberlist\") pod \"speaker-c59pj\" (UID: \"e0696463-355f-411b-89b5-9a0bd3211101\") " pod="metallb-system/speaker-c59pj" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.372960 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a8fcb989-a426-48fb-bf36-bf97b534f004-cert\") pod \"controller-6968d8fdc4-mqw8c\" (UID: \"a8fcb989-a426-48fb-bf36-bf97b534f004\") " pod="metallb-system/controller-6968d8fdc4-mqw8c" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.373002 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e0696463-355f-411b-89b5-9a0bd3211101-metrics-certs\") pod \"speaker-c59pj\" (UID: \"e0696463-355f-411b-89b5-9a0bd3211101\") " pod="metallb-system/speaker-c59pj" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.373064 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8mkc\" (UniqueName: \"kubernetes.io/projected/e0696463-355f-411b-89b5-9a0bd3211101-kube-api-access-d8mkc\") pod \"speaker-c59pj\" (UID: \"e0696463-355f-411b-89b5-9a0bd3211101\") " pod="metallb-system/speaker-c59pj" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.373101 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a8fcb989-a426-48fb-bf36-bf97b534f004-metrics-certs\") pod \"controller-6968d8fdc4-mqw8c\" (UID: \"a8fcb989-a426-48fb-bf36-bf97b534f004\") " pod="metallb-system/controller-6968d8fdc4-mqw8c" Jan 30 12:13:31 crc kubenswrapper[4703]: E0130 12:13:31.373102 4703 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 30 12:13:31 crc kubenswrapper[4703]: E0130 12:13:31.373218 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e0696463-355f-411b-89b5-9a0bd3211101-memberlist podName:e0696463-355f-411b-89b5-9a0bd3211101 nodeName:}" failed. No retries permitted until 2026-01-30 12:13:31.873189572 +0000 UTC m=+1047.651011226 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/e0696463-355f-411b-89b5-9a0bd3211101-memberlist") pod "speaker-c59pj" (UID: "e0696463-355f-411b-89b5-9a0bd3211101") : secret "metallb-memberlist" not found Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.373251 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e0696463-355f-411b-89b5-9a0bd3211101-metallb-excludel2\") pod \"speaker-c59pj\" (UID: \"e0696463-355f-411b-89b5-9a0bd3211101\") " pod="metallb-system/speaker-c59pj" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.374434 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e0696463-355f-411b-89b5-9a0bd3211101-metallb-excludel2\") pod \"speaker-c59pj\" (UID: \"e0696463-355f-411b-89b5-9a0bd3211101\") " pod="metallb-system/speaker-c59pj" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.379912 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e0696463-355f-411b-89b5-9a0bd3211101-metrics-certs\") pod \"speaker-c59pj\" (UID: \"e0696463-355f-411b-89b5-9a0bd3211101\") " pod="metallb-system/speaker-c59pj" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.395843 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8mkc\" (UniqueName: \"kubernetes.io/projected/e0696463-355f-411b-89b5-9a0bd3211101-kube-api-access-d8mkc\") pod \"speaker-c59pj\" (UID: \"e0696463-355f-411b-89b5-9a0bd3211101\") " pod="metallb-system/speaker-c59pj" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.449741 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-bhs52" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.475712 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a8fcb989-a426-48fb-bf36-bf97b534f004-metrics-certs\") pod \"controller-6968d8fdc4-mqw8c\" (UID: \"a8fcb989-a426-48fb-bf36-bf97b534f004\") " pod="metallb-system/controller-6968d8fdc4-mqw8c" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.475871 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6m58j\" (UniqueName: \"kubernetes.io/projected/a8fcb989-a426-48fb-bf36-bf97b534f004-kube-api-access-6m58j\") pod \"controller-6968d8fdc4-mqw8c\" (UID: \"a8fcb989-a426-48fb-bf36-bf97b534f004\") " pod="metallb-system/controller-6968d8fdc4-mqw8c" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.475985 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a8fcb989-a426-48fb-bf36-bf97b534f004-cert\") pod \"controller-6968d8fdc4-mqw8c\" (UID: \"a8fcb989-a426-48fb-bf36-bf97b534f004\") " pod="metallb-system/controller-6968d8fdc4-mqw8c" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.480396 4703 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.480954 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a8fcb989-a426-48fb-bf36-bf97b534f004-metrics-certs\") pod \"controller-6968d8fdc4-mqw8c\" (UID: \"a8fcb989-a426-48fb-bf36-bf97b534f004\") " pod="metallb-system/controller-6968d8fdc4-mqw8c" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.578279 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a8fcb989-a426-48fb-bf36-bf97b534f004-cert\") pod \"controller-6968d8fdc4-mqw8c\" (UID: \"a8fcb989-a426-48fb-bf36-bf97b534f004\") " pod="metallb-system/controller-6968d8fdc4-mqw8c" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.585446 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6m58j\" (UniqueName: \"kubernetes.io/projected/a8fcb989-a426-48fb-bf36-bf97b534f004-kube-api-access-6m58j\") pod \"controller-6968d8fdc4-mqw8c\" (UID: \"a8fcb989-a426-48fb-bf36-bf97b534f004\") " pod="metallb-system/controller-6968d8fdc4-mqw8c" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.633736 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-mqw8c" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.784168 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-bhs52"] Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.785017 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2bb59434-7622-4640-bb0c-28839fa5405c-metrics-certs\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.789560 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2bb59434-7622-4640-bb0c-28839fa5405c-metrics-certs\") pod \"frr-k8s-krzld\" (UID: \"2bb59434-7622-4640-bb0c-28839fa5405c\") " pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:31 crc kubenswrapper[4703]: I0130 12:13:31.886308 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e0696463-355f-411b-89b5-9a0bd3211101-memberlist\") pod \"speaker-c59pj\" (UID: \"e0696463-355f-411b-89b5-9a0bd3211101\") " pod="metallb-system/speaker-c59pj" Jan 30 12:13:31 crc kubenswrapper[4703]: E0130 12:13:31.886982 4703 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 30 12:13:31 crc kubenswrapper[4703]: E0130 12:13:31.887104 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e0696463-355f-411b-89b5-9a0bd3211101-memberlist podName:e0696463-355f-411b-89b5-9a0bd3211101 nodeName:}" failed. No retries permitted until 2026-01-30 12:13:32.88707674 +0000 UTC m=+1048.664898394 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/e0696463-355f-411b-89b5-9a0bd3211101-memberlist") pod "speaker-c59pj" (UID: "e0696463-355f-411b-89b5-9a0bd3211101") : secret "metallb-memberlist" not found Jan 30 12:13:32 crc kubenswrapper[4703]: I0130 12:13:32.033742 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:32 crc kubenswrapper[4703]: I0130 12:13:32.073727 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-mqw8c"] Jan 30 12:13:32 crc kubenswrapper[4703]: W0130 12:13:32.081066 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda8fcb989_a426_48fb_bf36_bf97b534f004.slice/crio-c1326bd92e1b592dad724ce99f8bdf4cf80b2c66b332e653f8446411f41af388 WatchSource:0}: Error finding container c1326bd92e1b592dad724ce99f8bdf4cf80b2c66b332e653f8446411f41af388: Status 404 returned error can't find the container with id c1326bd92e1b592dad724ce99f8bdf4cf80b2c66b332e653f8446411f41af388 Jan 30 12:13:32 crc kubenswrapper[4703]: I0130 12:13:32.308697 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-mqw8c" event={"ID":"a8fcb989-a426-48fb-bf36-bf97b534f004","Type":"ContainerStarted","Data":"371f40c4e9b59b4be67caf9e36043d3fe2276651fec38ef7571d49c2fa4d6a55"} Jan 30 12:13:32 crc kubenswrapper[4703]: I0130 12:13:32.308741 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-mqw8c" event={"ID":"a8fcb989-a426-48fb-bf36-bf97b534f004","Type":"ContainerStarted","Data":"c1326bd92e1b592dad724ce99f8bdf4cf80b2c66b332e653f8446411f41af388"} Jan 30 12:13:32 crc kubenswrapper[4703]: I0130 12:13:32.309637 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-krzld" event={"ID":"2bb59434-7622-4640-bb0c-28839fa5405c","Type":"ContainerStarted","Data":"3a97e86a48d0a8b8b25819447b972b7d9b5f07da23a8203883c8cd47a2c01bea"} Jan 30 12:13:32 crc kubenswrapper[4703]: I0130 12:13:32.310623 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-bhs52" event={"ID":"fe952b2e-c0d5-4420-a5ba-7128c73c6b3e","Type":"ContainerStarted","Data":"27cfacf211add559c0186f5b77dfcab438fbe72bd617c2b8c8819375a8590e65"} Jan 30 12:13:32 crc kubenswrapper[4703]: I0130 12:13:32.903825 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e0696463-355f-411b-89b5-9a0bd3211101-memberlist\") pod \"speaker-c59pj\" (UID: \"e0696463-355f-411b-89b5-9a0bd3211101\") " pod="metallb-system/speaker-c59pj" Jan 30 12:13:32 crc kubenswrapper[4703]: I0130 12:13:32.911227 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e0696463-355f-411b-89b5-9a0bd3211101-memberlist\") pod \"speaker-c59pj\" (UID: \"e0696463-355f-411b-89b5-9a0bd3211101\") " pod="metallb-system/speaker-c59pj" Jan 30 12:13:33 crc kubenswrapper[4703]: I0130 12:13:33.046240 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-c59pj" Jan 30 12:13:33 crc kubenswrapper[4703]: I0130 12:13:33.357191 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-mqw8c" event={"ID":"a8fcb989-a426-48fb-bf36-bf97b534f004","Type":"ContainerStarted","Data":"71f8d6a93f1e07dd391c80461ce04045fe0d4b7073db250edb0cdca079ad50f9"} Jan 30 12:13:33 crc kubenswrapper[4703]: I0130 12:13:33.358312 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-mqw8c" Jan 30 12:13:33 crc kubenswrapper[4703]: I0130 12:13:33.366434 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-c59pj" event={"ID":"e0696463-355f-411b-89b5-9a0bd3211101","Type":"ContainerStarted","Data":"99bcdcb3d26825cf212e74409828e7fa381e40e3a1be6877780ff75d691e2152"} Jan 30 12:13:33 crc kubenswrapper[4703]: I0130 12:13:33.411374 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-mqw8c" podStartSLOduration=2.411341126 podStartE2EDuration="2.411341126s" podCreationTimestamp="2026-01-30 12:13:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:13:33.40148653 +0000 UTC m=+1049.179308184" watchObservedRunningTime="2026-01-30 12:13:33.411341126 +0000 UTC m=+1049.189162780" Jan 30 12:13:34 crc kubenswrapper[4703]: I0130 12:13:34.381632 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-c59pj" event={"ID":"e0696463-355f-411b-89b5-9a0bd3211101","Type":"ContainerStarted","Data":"15f0acf8e510fc247c62c9b9d3b7b78ddddc024ba4e2157020dd0ece0015f7f9"} Jan 30 12:13:34 crc kubenswrapper[4703]: I0130 12:13:34.382230 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-c59pj" event={"ID":"e0696463-355f-411b-89b5-9a0bd3211101","Type":"ContainerStarted","Data":"e257044e08697d7d75118cb803d6a2760d54d7d568c164e59aac807ddddeef5c"} Jan 30 12:13:34 crc kubenswrapper[4703]: I0130 12:13:34.422756 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-c59pj" podStartSLOduration=3.42270507 podStartE2EDuration="3.42270507s" podCreationTimestamp="2026-01-30 12:13:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:13:34.419110563 +0000 UTC m=+1050.196932217" watchObservedRunningTime="2026-01-30 12:13:34.42270507 +0000 UTC m=+1050.200526724" Jan 30 12:13:35 crc kubenswrapper[4703]: I0130 12:13:35.391257 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-c59pj" Jan 30 12:13:41 crc kubenswrapper[4703]: I0130 12:13:41.513384 4703 generic.go:334] "Generic (PLEG): container finished" podID="2bb59434-7622-4640-bb0c-28839fa5405c" containerID="d71fdc314471b765d609b674e94fe8f42587912c09b7d715e31cd94669f39a1f" exitCode=0 Jan 30 12:13:41 crc kubenswrapper[4703]: I0130 12:13:41.513475 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-krzld" event={"ID":"2bb59434-7622-4640-bb0c-28839fa5405c","Type":"ContainerDied","Data":"d71fdc314471b765d609b674e94fe8f42587912c09b7d715e31cd94669f39a1f"} Jan 30 12:13:41 crc kubenswrapper[4703]: I0130 12:13:41.517610 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-bhs52" event={"ID":"fe952b2e-c0d5-4420-a5ba-7128c73c6b3e","Type":"ContainerStarted","Data":"5fb44744a92a4bd43248cc2cecc737fcd5b4bab08648b88f76a4239626c4b01a"} Jan 30 12:13:41 crc kubenswrapper[4703]: I0130 12:13:41.517797 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-bhs52" Jan 30 12:13:41 crc kubenswrapper[4703]: I0130 12:13:41.567677 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-bhs52" podStartSLOduration=1.336514972 podStartE2EDuration="10.56764785s" podCreationTimestamp="2026-01-30 12:13:31 +0000 UTC" firstStartedPulling="2026-01-30 12:13:31.801299339 +0000 UTC m=+1047.579120993" lastFinishedPulling="2026-01-30 12:13:41.032432197 +0000 UTC m=+1056.810253871" observedRunningTime="2026-01-30 12:13:41.559902151 +0000 UTC m=+1057.337723825" watchObservedRunningTime="2026-01-30 12:13:41.56764785 +0000 UTC m=+1057.345469504" Jan 30 12:13:42 crc kubenswrapper[4703]: I0130 12:13:42.527544 4703 generic.go:334] "Generic (PLEG): container finished" podID="2bb59434-7622-4640-bb0c-28839fa5405c" containerID="708866639e83891185c0b1efe03c33913cee31e1e68d3a9692e3d5d8dd2ceb09" exitCode=0 Jan 30 12:13:42 crc kubenswrapper[4703]: I0130 12:13:42.527683 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-krzld" event={"ID":"2bb59434-7622-4640-bb0c-28839fa5405c","Type":"ContainerDied","Data":"708866639e83891185c0b1efe03c33913cee31e1e68d3a9692e3d5d8dd2ceb09"} Jan 30 12:13:42 crc kubenswrapper[4703]: I0130 12:13:42.822852 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:13:42 crc kubenswrapper[4703]: I0130 12:13:42.822945 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:13:43 crc kubenswrapper[4703]: I0130 12:13:43.052507 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-c59pj" Jan 30 12:13:43 crc kubenswrapper[4703]: I0130 12:13:43.540887 4703 generic.go:334] "Generic (PLEG): container finished" podID="2bb59434-7622-4640-bb0c-28839fa5405c" containerID="a980c479906e0135b4c885b3e8f5cf261dcd2e454abe6b5f71b0d154147a3a44" exitCode=0 Jan 30 12:13:43 crc kubenswrapper[4703]: I0130 12:13:43.540998 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-krzld" event={"ID":"2bb59434-7622-4640-bb0c-28839fa5405c","Type":"ContainerDied","Data":"a980c479906e0135b4c885b3e8f5cf261dcd2e454abe6b5f71b0d154147a3a44"} Jan 30 12:13:44 crc kubenswrapper[4703]: I0130 12:13:44.559353 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-krzld" event={"ID":"2bb59434-7622-4640-bb0c-28839fa5405c","Type":"ContainerStarted","Data":"0e48de14b51e5ff5cf26e959fbc2e9a9cd855922e7aef14910d7cc497755f6ad"} Jan 30 12:13:44 crc kubenswrapper[4703]: I0130 12:13:44.559843 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-krzld" event={"ID":"2bb59434-7622-4640-bb0c-28839fa5405c","Type":"ContainerStarted","Data":"63a98d7fd80e5248c7dbf80e7d09a8de874180b21850ba7f0f7f48e5f0329a51"} Jan 30 12:13:44 crc kubenswrapper[4703]: I0130 12:13:44.559858 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-krzld" event={"ID":"2bb59434-7622-4640-bb0c-28839fa5405c","Type":"ContainerStarted","Data":"1c4107d40e763662fdf891002c7824dbea7d92a8497d0e90fc907b53c987a384"} Jan 30 12:13:44 crc kubenswrapper[4703]: I0130 12:13:44.559868 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-krzld" event={"ID":"2bb59434-7622-4640-bb0c-28839fa5405c","Type":"ContainerStarted","Data":"27c684a82991220a7db1a0692953e711c836961066af013180a5c8ec578d2434"} Jan 30 12:13:44 crc kubenswrapper[4703]: I0130 12:13:44.559882 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-krzld" event={"ID":"2bb59434-7622-4640-bb0c-28839fa5405c","Type":"ContainerStarted","Data":"27029bf0b263387fe20adcf2c1313493ec8d8cb40cb68b20c9df3dbd60b7f581"} Jan 30 12:13:45 crc kubenswrapper[4703]: I0130 12:13:45.573833 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-krzld" event={"ID":"2bb59434-7622-4640-bb0c-28839fa5405c","Type":"ContainerStarted","Data":"bad761be33a135a88747e4b34653f9f8fb032c776182599f3d6498936c225ad6"} Jan 30 12:13:45 crc kubenswrapper[4703]: I0130 12:13:45.574032 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:45 crc kubenswrapper[4703]: I0130 12:13:45.610201 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-krzld" podStartSLOduration=5.744910159 podStartE2EDuration="14.610178157s" podCreationTimestamp="2026-01-30 12:13:31 +0000 UTC" firstStartedPulling="2026-01-30 12:13:32.193951391 +0000 UTC m=+1047.971773035" lastFinishedPulling="2026-01-30 12:13:41.059219379 +0000 UTC m=+1056.837041033" observedRunningTime="2026-01-30 12:13:45.604421551 +0000 UTC m=+1061.382243215" watchObservedRunningTime="2026-01-30 12:13:45.610178157 +0000 UTC m=+1061.387999811" Jan 30 12:13:45 crc kubenswrapper[4703]: I0130 12:13:45.900893 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-s6sct"] Jan 30 12:13:45 crc kubenswrapper[4703]: I0130 12:13:45.902381 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-s6sct" Jan 30 12:13:45 crc kubenswrapper[4703]: I0130 12:13:45.905475 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 30 12:13:45 crc kubenswrapper[4703]: I0130 12:13:45.905814 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-wx8kr" Jan 30 12:13:45 crc kubenswrapper[4703]: I0130 12:13:45.908429 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 30 12:13:45 crc kubenswrapper[4703]: I0130 12:13:45.964549 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-s6sct"] Jan 30 12:13:46 crc kubenswrapper[4703]: I0130 12:13:46.046927 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g697l\" (UniqueName: \"kubernetes.io/projected/2ae816c6-42aa-4be8-83fe-3bec11995856-kube-api-access-g697l\") pod \"openstack-operator-index-s6sct\" (UID: \"2ae816c6-42aa-4be8-83fe-3bec11995856\") " pod="openstack-operators/openstack-operator-index-s6sct" Jan 30 12:13:46 crc kubenswrapper[4703]: I0130 12:13:46.148452 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g697l\" (UniqueName: \"kubernetes.io/projected/2ae816c6-42aa-4be8-83fe-3bec11995856-kube-api-access-g697l\") pod \"openstack-operator-index-s6sct\" (UID: \"2ae816c6-42aa-4be8-83fe-3bec11995856\") " pod="openstack-operators/openstack-operator-index-s6sct" Jan 30 12:13:46 crc kubenswrapper[4703]: I0130 12:13:46.171796 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g697l\" (UniqueName: \"kubernetes.io/projected/2ae816c6-42aa-4be8-83fe-3bec11995856-kube-api-access-g697l\") pod \"openstack-operator-index-s6sct\" (UID: \"2ae816c6-42aa-4be8-83fe-3bec11995856\") " pod="openstack-operators/openstack-operator-index-s6sct" Jan 30 12:13:46 crc kubenswrapper[4703]: I0130 12:13:46.263453 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-s6sct" Jan 30 12:13:46 crc kubenswrapper[4703]: I0130 12:13:46.712336 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-s6sct"] Jan 30 12:13:46 crc kubenswrapper[4703]: W0130 12:13:46.719478 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2ae816c6_42aa_4be8_83fe_3bec11995856.slice/crio-59da3ff2026d0504fa41d3d26eb2f4a49dca1cf42c9f2a73670c59ae2a666b2d WatchSource:0}: Error finding container 59da3ff2026d0504fa41d3d26eb2f4a49dca1cf42c9f2a73670c59ae2a666b2d: Status 404 returned error can't find the container with id 59da3ff2026d0504fa41d3d26eb2f4a49dca1cf42c9f2a73670c59ae2a666b2d Jan 30 12:13:47 crc kubenswrapper[4703]: I0130 12:13:47.035944 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:47 crc kubenswrapper[4703]: I0130 12:13:47.078854 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-krzld" Jan 30 12:13:47 crc kubenswrapper[4703]: I0130 12:13:47.594144 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-s6sct" event={"ID":"2ae816c6-42aa-4be8-83fe-3bec11995856","Type":"ContainerStarted","Data":"59da3ff2026d0504fa41d3d26eb2f4a49dca1cf42c9f2a73670c59ae2a666b2d"} Jan 30 12:13:48 crc kubenswrapper[4703]: I0130 12:13:48.474270 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-s6sct"] Jan 30 12:13:49 crc kubenswrapper[4703]: I0130 12:13:49.082030 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-nfflc"] Jan 30 12:13:49 crc kubenswrapper[4703]: I0130 12:13:49.083970 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-nfflc" Jan 30 12:13:49 crc kubenswrapper[4703]: I0130 12:13:49.097537 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-nfflc"] Jan 30 12:13:49 crc kubenswrapper[4703]: I0130 12:13:49.213026 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7j8z9\" (UniqueName: \"kubernetes.io/projected/2dd61d9e-b40e-44fe-84bc-9be8323b133a-kube-api-access-7j8z9\") pod \"openstack-operator-index-nfflc\" (UID: \"2dd61d9e-b40e-44fe-84bc-9be8323b133a\") " pod="openstack-operators/openstack-operator-index-nfflc" Jan 30 12:13:49 crc kubenswrapper[4703]: I0130 12:13:49.314372 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7j8z9\" (UniqueName: \"kubernetes.io/projected/2dd61d9e-b40e-44fe-84bc-9be8323b133a-kube-api-access-7j8z9\") pod \"openstack-operator-index-nfflc\" (UID: \"2dd61d9e-b40e-44fe-84bc-9be8323b133a\") " pod="openstack-operators/openstack-operator-index-nfflc" Jan 30 12:13:49 crc kubenswrapper[4703]: I0130 12:13:49.340802 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7j8z9\" (UniqueName: \"kubernetes.io/projected/2dd61d9e-b40e-44fe-84bc-9be8323b133a-kube-api-access-7j8z9\") pod \"openstack-operator-index-nfflc\" (UID: \"2dd61d9e-b40e-44fe-84bc-9be8323b133a\") " pod="openstack-operators/openstack-operator-index-nfflc" Jan 30 12:13:49 crc kubenswrapper[4703]: I0130 12:13:49.413805 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-nfflc" Jan 30 12:13:49 crc kubenswrapper[4703]: I0130 12:13:49.837068 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-nfflc"] Jan 30 12:13:50 crc kubenswrapper[4703]: I0130 12:13:50.617833 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-s6sct" event={"ID":"2ae816c6-42aa-4be8-83fe-3bec11995856","Type":"ContainerStarted","Data":"edc4419725c16ae2c5807f18c8e2a6ef751146bf29fcc6266fdf4490145f0776"} Jan 30 12:13:50 crc kubenswrapper[4703]: I0130 12:13:50.617953 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-s6sct" podUID="2ae816c6-42aa-4be8-83fe-3bec11995856" containerName="registry-server" containerID="cri-o://edc4419725c16ae2c5807f18c8e2a6ef751146bf29fcc6266fdf4490145f0776" gracePeriod=2 Jan 30 12:13:50 crc kubenswrapper[4703]: I0130 12:13:50.623391 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-nfflc" event={"ID":"2dd61d9e-b40e-44fe-84bc-9be8323b133a","Type":"ContainerStarted","Data":"bd6b73f1c29f15749aa343c4bc6e6695a64895700ffbc073dfa57e377f14d587"} Jan 30 12:13:50 crc kubenswrapper[4703]: I0130 12:13:50.623555 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-nfflc" event={"ID":"2dd61d9e-b40e-44fe-84bc-9be8323b133a","Type":"ContainerStarted","Data":"8aea1c8dbbe7585059fe177412926132ebd6e656ae49186fdffd7b514d40fab5"} Jan 30 12:13:50 crc kubenswrapper[4703]: I0130 12:13:50.651110 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-s6sct" podStartSLOduration=2.698865373 podStartE2EDuration="5.651083158s" podCreationTimestamp="2026-01-30 12:13:45 +0000 UTC" firstStartedPulling="2026-01-30 12:13:46.72206518 +0000 UTC m=+1062.499886834" lastFinishedPulling="2026-01-30 12:13:49.674282965 +0000 UTC m=+1065.452104619" observedRunningTime="2026-01-30 12:13:50.639097415 +0000 UTC m=+1066.416919099" watchObservedRunningTime="2026-01-30 12:13:50.651083158 +0000 UTC m=+1066.428904812" Jan 30 12:13:50 crc kubenswrapper[4703]: I0130 12:13:50.661741 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-nfflc" podStartSLOduration=1.574312039 podStartE2EDuration="1.661703584s" podCreationTimestamp="2026-01-30 12:13:49 +0000 UTC" firstStartedPulling="2026-01-30 12:13:49.851770108 +0000 UTC m=+1065.629591772" lastFinishedPulling="2026-01-30 12:13:49.939161663 +0000 UTC m=+1065.716983317" observedRunningTime="2026-01-30 12:13:50.657797549 +0000 UTC m=+1066.435619203" watchObservedRunningTime="2026-01-30 12:13:50.661703584 +0000 UTC m=+1066.439525238" Jan 30 12:13:51 crc kubenswrapper[4703]: I0130 12:13:51.001319 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-s6sct" Jan 30 12:13:51 crc kubenswrapper[4703]: I0130 12:13:51.147574 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g697l\" (UniqueName: \"kubernetes.io/projected/2ae816c6-42aa-4be8-83fe-3bec11995856-kube-api-access-g697l\") pod \"2ae816c6-42aa-4be8-83fe-3bec11995856\" (UID: \"2ae816c6-42aa-4be8-83fe-3bec11995856\") " Jan 30 12:13:51 crc kubenswrapper[4703]: I0130 12:13:51.156175 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ae816c6-42aa-4be8-83fe-3bec11995856-kube-api-access-g697l" (OuterVolumeSpecName: "kube-api-access-g697l") pod "2ae816c6-42aa-4be8-83fe-3bec11995856" (UID: "2ae816c6-42aa-4be8-83fe-3bec11995856"). InnerVolumeSpecName "kube-api-access-g697l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:13:51 crc kubenswrapper[4703]: I0130 12:13:51.250050 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g697l\" (UniqueName: \"kubernetes.io/projected/2ae816c6-42aa-4be8-83fe-3bec11995856-kube-api-access-g697l\") on node \"crc\" DevicePath \"\"" Jan 30 12:13:51 crc kubenswrapper[4703]: I0130 12:13:51.456245 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-bhs52" Jan 30 12:13:51 crc kubenswrapper[4703]: I0130 12:13:51.633586 4703 generic.go:334] "Generic (PLEG): container finished" podID="2ae816c6-42aa-4be8-83fe-3bec11995856" containerID="edc4419725c16ae2c5807f18c8e2a6ef751146bf29fcc6266fdf4490145f0776" exitCode=0 Jan 30 12:13:51 crc kubenswrapper[4703]: I0130 12:13:51.633682 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-s6sct" event={"ID":"2ae816c6-42aa-4be8-83fe-3bec11995856","Type":"ContainerDied","Data":"edc4419725c16ae2c5807f18c8e2a6ef751146bf29fcc6266fdf4490145f0776"} Jan 30 12:13:51 crc kubenswrapper[4703]: I0130 12:13:51.633721 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-s6sct" Jan 30 12:13:51 crc kubenswrapper[4703]: I0130 12:13:51.633772 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-s6sct" event={"ID":"2ae816c6-42aa-4be8-83fe-3bec11995856","Type":"ContainerDied","Data":"59da3ff2026d0504fa41d3d26eb2f4a49dca1cf42c9f2a73670c59ae2a666b2d"} Jan 30 12:13:51 crc kubenswrapper[4703]: I0130 12:13:51.633795 4703 scope.go:117] "RemoveContainer" containerID="edc4419725c16ae2c5807f18c8e2a6ef751146bf29fcc6266fdf4490145f0776" Jan 30 12:13:51 crc kubenswrapper[4703]: I0130 12:13:51.640958 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-mqw8c" Jan 30 12:13:51 crc kubenswrapper[4703]: I0130 12:13:51.663564 4703 scope.go:117] "RemoveContainer" containerID="edc4419725c16ae2c5807f18c8e2a6ef751146bf29fcc6266fdf4490145f0776" Jan 30 12:13:51 crc kubenswrapper[4703]: E0130 12:13:51.664798 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edc4419725c16ae2c5807f18c8e2a6ef751146bf29fcc6266fdf4490145f0776\": container with ID starting with edc4419725c16ae2c5807f18c8e2a6ef751146bf29fcc6266fdf4490145f0776 not found: ID does not exist" containerID="edc4419725c16ae2c5807f18c8e2a6ef751146bf29fcc6266fdf4490145f0776" Jan 30 12:13:51 crc kubenswrapper[4703]: I0130 12:13:51.664921 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edc4419725c16ae2c5807f18c8e2a6ef751146bf29fcc6266fdf4490145f0776"} err="failed to get container status \"edc4419725c16ae2c5807f18c8e2a6ef751146bf29fcc6266fdf4490145f0776\": rpc error: code = NotFound desc = could not find container \"edc4419725c16ae2c5807f18c8e2a6ef751146bf29fcc6266fdf4490145f0776\": container with ID starting with edc4419725c16ae2c5807f18c8e2a6ef751146bf29fcc6266fdf4490145f0776 not found: ID does not exist" Jan 30 12:13:51 crc kubenswrapper[4703]: I0130 12:13:51.688192 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-s6sct"] Jan 30 12:13:51 crc kubenswrapper[4703]: I0130 12:13:51.693149 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-s6sct"] Jan 30 12:13:53 crc kubenswrapper[4703]: I0130 12:13:53.095744 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ae816c6-42aa-4be8-83fe-3bec11995856" path="/var/lib/kubelet/pods/2ae816c6-42aa-4be8-83fe-3bec11995856/volumes" Jan 30 12:13:59 crc kubenswrapper[4703]: I0130 12:13:59.414227 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-nfflc" Jan 30 12:13:59 crc kubenswrapper[4703]: I0130 12:13:59.415321 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-nfflc" Jan 30 12:13:59 crc kubenswrapper[4703]: I0130 12:13:59.451229 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-nfflc" Jan 30 12:13:59 crc kubenswrapper[4703]: I0130 12:13:59.718874 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-nfflc" Jan 30 12:14:02 crc kubenswrapper[4703]: I0130 12:14:02.037675 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-krzld" Jan 30 12:14:04 crc kubenswrapper[4703]: I0130 12:14:04.159090 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc"] Jan 30 12:14:04 crc kubenswrapper[4703]: E0130 12:14:04.159790 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ae816c6-42aa-4be8-83fe-3bec11995856" containerName="registry-server" Jan 30 12:14:04 crc kubenswrapper[4703]: I0130 12:14:04.159810 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ae816c6-42aa-4be8-83fe-3bec11995856" containerName="registry-server" Jan 30 12:14:04 crc kubenswrapper[4703]: I0130 12:14:04.160020 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ae816c6-42aa-4be8-83fe-3bec11995856" containerName="registry-server" Jan 30 12:14:04 crc kubenswrapper[4703]: I0130 12:14:04.161284 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" Jan 30 12:14:04 crc kubenswrapper[4703]: I0130 12:14:04.165058 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-9jmj2" Jan 30 12:14:04 crc kubenswrapper[4703]: I0130 12:14:04.171784 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc"] Jan 30 12:14:04 crc kubenswrapper[4703]: I0130 12:14:04.259869 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d0ed19e-733e-4b63-923f-426ecea8ffe1-util\") pod \"ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc\" (UID: \"0d0ed19e-733e-4b63-923f-426ecea8ffe1\") " pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" Jan 30 12:14:04 crc kubenswrapper[4703]: I0130 12:14:04.259990 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qzzk\" (UniqueName: \"kubernetes.io/projected/0d0ed19e-733e-4b63-923f-426ecea8ffe1-kube-api-access-2qzzk\") pod \"ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc\" (UID: \"0d0ed19e-733e-4b63-923f-426ecea8ffe1\") " pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" Jan 30 12:14:04 crc kubenswrapper[4703]: I0130 12:14:04.260039 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d0ed19e-733e-4b63-923f-426ecea8ffe1-bundle\") pod \"ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc\" (UID: \"0d0ed19e-733e-4b63-923f-426ecea8ffe1\") " pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" Jan 30 12:14:04 crc kubenswrapper[4703]: I0130 12:14:04.361722 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d0ed19e-733e-4b63-923f-426ecea8ffe1-util\") pod \"ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc\" (UID: \"0d0ed19e-733e-4b63-923f-426ecea8ffe1\") " pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" Jan 30 12:14:04 crc kubenswrapper[4703]: I0130 12:14:04.361809 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qzzk\" (UniqueName: \"kubernetes.io/projected/0d0ed19e-733e-4b63-923f-426ecea8ffe1-kube-api-access-2qzzk\") pod \"ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc\" (UID: \"0d0ed19e-733e-4b63-923f-426ecea8ffe1\") " pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" Jan 30 12:14:04 crc kubenswrapper[4703]: I0130 12:14:04.361835 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d0ed19e-733e-4b63-923f-426ecea8ffe1-bundle\") pod \"ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc\" (UID: \"0d0ed19e-733e-4b63-923f-426ecea8ffe1\") " pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" Jan 30 12:14:04 crc kubenswrapper[4703]: I0130 12:14:04.362316 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d0ed19e-733e-4b63-923f-426ecea8ffe1-util\") pod \"ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc\" (UID: \"0d0ed19e-733e-4b63-923f-426ecea8ffe1\") " pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" Jan 30 12:14:04 crc kubenswrapper[4703]: I0130 12:14:04.362339 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d0ed19e-733e-4b63-923f-426ecea8ffe1-bundle\") pod \"ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc\" (UID: \"0d0ed19e-733e-4b63-923f-426ecea8ffe1\") " pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" Jan 30 12:14:04 crc kubenswrapper[4703]: I0130 12:14:04.386365 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qzzk\" (UniqueName: \"kubernetes.io/projected/0d0ed19e-733e-4b63-923f-426ecea8ffe1-kube-api-access-2qzzk\") pod \"ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc\" (UID: \"0d0ed19e-733e-4b63-923f-426ecea8ffe1\") " pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" Jan 30 12:14:04 crc kubenswrapper[4703]: I0130 12:14:04.479676 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" Jan 30 12:14:04 crc kubenswrapper[4703]: I0130 12:14:04.923205 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc"] Jan 30 12:14:05 crc kubenswrapper[4703]: I0130 12:14:05.735441 4703 generic.go:334] "Generic (PLEG): container finished" podID="0d0ed19e-733e-4b63-923f-426ecea8ffe1" containerID="6c05008949abe902189a2b2ea6623cdb5232aaebff25c0c378b32a907f1623fd" exitCode=0 Jan 30 12:14:05 crc kubenswrapper[4703]: I0130 12:14:05.735529 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" event={"ID":"0d0ed19e-733e-4b63-923f-426ecea8ffe1","Type":"ContainerDied","Data":"6c05008949abe902189a2b2ea6623cdb5232aaebff25c0c378b32a907f1623fd"} Jan 30 12:14:05 crc kubenswrapper[4703]: I0130 12:14:05.736031 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" event={"ID":"0d0ed19e-733e-4b63-923f-426ecea8ffe1","Type":"ContainerStarted","Data":"918d326065a2ccbfde91180b59b3f1c598f3223d4d3bf2eae9ef2ee46c110f5c"} Jan 30 12:14:06 crc kubenswrapper[4703]: I0130 12:14:06.746086 4703 generic.go:334] "Generic (PLEG): container finished" podID="0d0ed19e-733e-4b63-923f-426ecea8ffe1" containerID="7fb5d218e11fee954e2d88aaa0cf524ee5a9679766fa835890e4449a723b11c2" exitCode=0 Jan 30 12:14:06 crc kubenswrapper[4703]: I0130 12:14:06.746169 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" event={"ID":"0d0ed19e-733e-4b63-923f-426ecea8ffe1","Type":"ContainerDied","Data":"7fb5d218e11fee954e2d88aaa0cf524ee5a9679766fa835890e4449a723b11c2"} Jan 30 12:14:07 crc kubenswrapper[4703]: I0130 12:14:07.755664 4703 generic.go:334] "Generic (PLEG): container finished" podID="0d0ed19e-733e-4b63-923f-426ecea8ffe1" containerID="113c5920f09f1301c830231f15998537d162c1d1c322968615d39c9c53eeeec4" exitCode=0 Jan 30 12:14:07 crc kubenswrapper[4703]: I0130 12:14:07.755725 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" event={"ID":"0d0ed19e-733e-4b63-923f-426ecea8ffe1","Type":"ContainerDied","Data":"113c5920f09f1301c830231f15998537d162c1d1c322968615d39c9c53eeeec4"} Jan 30 12:14:09 crc kubenswrapper[4703]: I0130 12:14:09.034279 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" Jan 30 12:14:09 crc kubenswrapper[4703]: I0130 12:14:09.151917 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d0ed19e-733e-4b63-923f-426ecea8ffe1-bundle\") pod \"0d0ed19e-733e-4b63-923f-426ecea8ffe1\" (UID: \"0d0ed19e-733e-4b63-923f-426ecea8ffe1\") " Jan 30 12:14:09 crc kubenswrapper[4703]: I0130 12:14:09.152552 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d0ed19e-733e-4b63-923f-426ecea8ffe1-util\") pod \"0d0ed19e-733e-4b63-923f-426ecea8ffe1\" (UID: \"0d0ed19e-733e-4b63-923f-426ecea8ffe1\") " Jan 30 12:14:09 crc kubenswrapper[4703]: I0130 12:14:09.152599 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qzzk\" (UniqueName: \"kubernetes.io/projected/0d0ed19e-733e-4b63-923f-426ecea8ffe1-kube-api-access-2qzzk\") pod \"0d0ed19e-733e-4b63-923f-426ecea8ffe1\" (UID: \"0d0ed19e-733e-4b63-923f-426ecea8ffe1\") " Jan 30 12:14:09 crc kubenswrapper[4703]: I0130 12:14:09.153751 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d0ed19e-733e-4b63-923f-426ecea8ffe1-bundle" (OuterVolumeSpecName: "bundle") pod "0d0ed19e-733e-4b63-923f-426ecea8ffe1" (UID: "0d0ed19e-733e-4b63-923f-426ecea8ffe1"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:14:09 crc kubenswrapper[4703]: I0130 12:14:09.160359 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d0ed19e-733e-4b63-923f-426ecea8ffe1-kube-api-access-2qzzk" (OuterVolumeSpecName: "kube-api-access-2qzzk") pod "0d0ed19e-733e-4b63-923f-426ecea8ffe1" (UID: "0d0ed19e-733e-4b63-923f-426ecea8ffe1"). InnerVolumeSpecName "kube-api-access-2qzzk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:14:09 crc kubenswrapper[4703]: I0130 12:14:09.167006 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d0ed19e-733e-4b63-923f-426ecea8ffe1-util" (OuterVolumeSpecName: "util") pod "0d0ed19e-733e-4b63-923f-426ecea8ffe1" (UID: "0d0ed19e-733e-4b63-923f-426ecea8ffe1"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:14:09 crc kubenswrapper[4703]: I0130 12:14:09.255266 4703 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0d0ed19e-733e-4b63-923f-426ecea8ffe1-util\") on node \"crc\" DevicePath \"\"" Jan 30 12:14:09 crc kubenswrapper[4703]: I0130 12:14:09.255319 4703 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0d0ed19e-733e-4b63-923f-426ecea8ffe1-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:14:09 crc kubenswrapper[4703]: I0130 12:14:09.255333 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qzzk\" (UniqueName: \"kubernetes.io/projected/0d0ed19e-733e-4b63-923f-426ecea8ffe1-kube-api-access-2qzzk\") on node \"crc\" DevicePath \"\"" Jan 30 12:14:09 crc kubenswrapper[4703]: I0130 12:14:09.772859 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" event={"ID":"0d0ed19e-733e-4b63-923f-426ecea8ffe1","Type":"ContainerDied","Data":"918d326065a2ccbfde91180b59b3f1c598f3223d4d3bf2eae9ef2ee46c110f5c"} Jan 30 12:14:09 crc kubenswrapper[4703]: I0130 12:14:09.772922 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="918d326065a2ccbfde91180b59b3f1c598f3223d4d3bf2eae9ef2ee46c110f5c" Jan 30 12:14:09 crc kubenswrapper[4703]: I0130 12:14:09.772922 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc" Jan 30 12:14:12 crc kubenswrapper[4703]: I0130 12:14:12.823576 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:14:12 crc kubenswrapper[4703]: I0130 12:14:12.823928 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:14:12 crc kubenswrapper[4703]: I0130 12:14:12.823986 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 12:14:12 crc kubenswrapper[4703]: I0130 12:14:12.824896 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cc192085768faef3bd05075caea9f2c24722e52bca08578d68a4a914317757f0"} pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 12:14:12 crc kubenswrapper[4703]: I0130 12:14:12.824966 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" containerID="cri-o://cc192085768faef3bd05075caea9f2c24722e52bca08578d68a4a914317757f0" gracePeriod=600 Jan 30 12:14:13 crc kubenswrapper[4703]: I0130 12:14:13.807849 4703 generic.go:334] "Generic (PLEG): container finished" podID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerID="cc192085768faef3bd05075caea9f2c24722e52bca08578d68a4a914317757f0" exitCode=0 Jan 30 12:14:13 crc kubenswrapper[4703]: I0130 12:14:13.807898 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerDied","Data":"cc192085768faef3bd05075caea9f2c24722e52bca08578d68a4a914317757f0"} Jan 30 12:14:13 crc kubenswrapper[4703]: I0130 12:14:13.808425 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerStarted","Data":"cc0f01995e25e263737b7a37c11076b9211642f05e9b4b225e1ac40c3094db02"} Jan 30 12:14:13 crc kubenswrapper[4703]: I0130 12:14:13.808455 4703 scope.go:117] "RemoveContainer" containerID="55a18e70c04b9c0432452c8dbe489a57bb034e3a138ce8caf3e700f751921742" Jan 30 12:14:17 crc kubenswrapper[4703]: I0130 12:14:17.167986 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-c759d5c99-j4njr"] Jan 30 12:14:17 crc kubenswrapper[4703]: E0130 12:14:17.168991 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d0ed19e-733e-4b63-923f-426ecea8ffe1" containerName="util" Jan 30 12:14:17 crc kubenswrapper[4703]: I0130 12:14:17.169013 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d0ed19e-733e-4b63-923f-426ecea8ffe1" containerName="util" Jan 30 12:14:17 crc kubenswrapper[4703]: E0130 12:14:17.169053 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d0ed19e-733e-4b63-923f-426ecea8ffe1" containerName="extract" Jan 30 12:14:17 crc kubenswrapper[4703]: I0130 12:14:17.169061 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d0ed19e-733e-4b63-923f-426ecea8ffe1" containerName="extract" Jan 30 12:14:17 crc kubenswrapper[4703]: E0130 12:14:17.169078 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d0ed19e-733e-4b63-923f-426ecea8ffe1" containerName="pull" Jan 30 12:14:17 crc kubenswrapper[4703]: I0130 12:14:17.169088 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d0ed19e-733e-4b63-923f-426ecea8ffe1" containerName="pull" Jan 30 12:14:17 crc kubenswrapper[4703]: I0130 12:14:17.169258 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d0ed19e-733e-4b63-923f-426ecea8ffe1" containerName="extract" Jan 30 12:14:17 crc kubenswrapper[4703]: I0130 12:14:17.169949 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-c759d5c99-j4njr" Jan 30 12:14:17 crc kubenswrapper[4703]: I0130 12:14:17.175097 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-8xdt7" Jan 30 12:14:17 crc kubenswrapper[4703]: I0130 12:14:17.202814 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-c759d5c99-j4njr"] Jan 30 12:14:17 crc kubenswrapper[4703]: I0130 12:14:17.294167 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dt9j5\" (UniqueName: \"kubernetes.io/projected/83a386a8-7fbe-48ca-b837-731a4fd64c57-kube-api-access-dt9j5\") pod \"openstack-operator-controller-init-c759d5c99-j4njr\" (UID: \"83a386a8-7fbe-48ca-b837-731a4fd64c57\") " pod="openstack-operators/openstack-operator-controller-init-c759d5c99-j4njr" Jan 30 12:14:17 crc kubenswrapper[4703]: I0130 12:14:17.395578 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dt9j5\" (UniqueName: \"kubernetes.io/projected/83a386a8-7fbe-48ca-b837-731a4fd64c57-kube-api-access-dt9j5\") pod \"openstack-operator-controller-init-c759d5c99-j4njr\" (UID: \"83a386a8-7fbe-48ca-b837-731a4fd64c57\") " pod="openstack-operators/openstack-operator-controller-init-c759d5c99-j4njr" Jan 30 12:14:17 crc kubenswrapper[4703]: I0130 12:14:17.419811 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dt9j5\" (UniqueName: \"kubernetes.io/projected/83a386a8-7fbe-48ca-b837-731a4fd64c57-kube-api-access-dt9j5\") pod \"openstack-operator-controller-init-c759d5c99-j4njr\" (UID: \"83a386a8-7fbe-48ca-b837-731a4fd64c57\") " pod="openstack-operators/openstack-operator-controller-init-c759d5c99-j4njr" Jan 30 12:14:17 crc kubenswrapper[4703]: I0130 12:14:17.533787 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-c759d5c99-j4njr" Jan 30 12:14:18 crc kubenswrapper[4703]: I0130 12:14:18.030100 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-c759d5c99-j4njr"] Jan 30 12:14:18 crc kubenswrapper[4703]: I0130 12:14:18.873547 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-c759d5c99-j4njr" event={"ID":"83a386a8-7fbe-48ca-b837-731a4fd64c57","Type":"ContainerStarted","Data":"3114cd06eef9b879ec4df23c1c539fd86856b10420bbf3b410e8a197556559ec"} Jan 30 12:14:23 crc kubenswrapper[4703]: I0130 12:14:23.921775 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-c759d5c99-j4njr" event={"ID":"83a386a8-7fbe-48ca-b837-731a4fd64c57","Type":"ContainerStarted","Data":"2a71d8bf1c65fffe410df513c9514f244007ebee46b1d0d7f7cf8593fdc3ba47"} Jan 30 12:14:23 crc kubenswrapper[4703]: I0130 12:14:23.922750 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-c759d5c99-j4njr" Jan 30 12:14:23 crc kubenswrapper[4703]: I0130 12:14:23.958546 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-c759d5c99-j4njr" podStartSLOduration=2.1243628 podStartE2EDuration="6.95851882s" podCreationTimestamp="2026-01-30 12:14:17 +0000 UTC" firstStartedPulling="2026-01-30 12:14:18.038602251 +0000 UTC m=+1093.816423905" lastFinishedPulling="2026-01-30 12:14:22.872758271 +0000 UTC m=+1098.650579925" observedRunningTime="2026-01-30 12:14:23.956344182 +0000 UTC m=+1099.734165856" watchObservedRunningTime="2026-01-30 12:14:23.95851882 +0000 UTC m=+1099.736340474" Jan 30 12:14:37 crc kubenswrapper[4703]: I0130 12:14:37.536218 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-c759d5c99-j4njr" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.360289 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-2l6qz"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.362424 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-2l6qz" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.369493 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8d874c8fc-pdt6v"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.369652 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-s24vz" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.370505 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pdt6v" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.382662 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-2l6qz"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.386962 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-d867l" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.405842 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8d874c8fc-pdt6v"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.438655 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-8886f4c47-vb4kb"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.440094 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-vb4kb" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.455336 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-d57jw" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.457181 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d9697b7f4-wxxzb"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.492228 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-wxxzb" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.530318 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8vd4\" (UniqueName: \"kubernetes.io/projected/7434f3b6-d77f-48b7-8ceb-f084a9c283f3-kube-api-access-t8vd4\") pod \"barbican-operator-controller-manager-7b6c4d8c5f-2l6qz\" (UID: \"7434f3b6-d77f-48b7-8ceb-f084a9c283f3\") " pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-2l6qz" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.530470 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrp9j\" (UniqueName: \"kubernetes.io/projected/a634d819-9927-4b82-8ec1-959ca5f19908-kube-api-access-xrp9j\") pod \"glance-operator-controller-manager-8886f4c47-vb4kb\" (UID: \"a634d819-9927-4b82-8ec1-959ca5f19908\") " pod="openstack-operators/glance-operator-controller-manager-8886f4c47-vb4kb" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.530506 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kbp97\" (UniqueName: \"kubernetes.io/projected/6219460f-6432-4743-984d-e3c0ce8d4538-kube-api-access-kbp97\") pod \"cinder-operator-controller-manager-8d874c8fc-pdt6v\" (UID: \"6219460f-6432-4743-984d-e3c0ce8d4538\") " pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pdt6v" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.531177 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-vsmkg" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.535330 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-69d6db494d-fzxq8"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.536513 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-fzxq8" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.539931 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-m2lr5" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.546966 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-z2td9"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.548723 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.551445 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-q26rs"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.556744 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-jz68n" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.562716 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-q26rs" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.564280 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d9697b7f4-wxxzb"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.571553 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-69d6db494d-fzxq8"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.575028 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.578357 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-gjf4v" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.598073 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-z2td9"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.617415 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-q26rs"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.625500 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-8886f4c47-vb4kb"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.632470 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vrgt\" (UniqueName: \"kubernetes.io/projected/61661318-fcab-41f8-a672-7fe2b6cfa1ce-kube-api-access-4vrgt\") pod \"horizon-operator-controller-manager-5fb775575f-q26rs\" (UID: \"61661318-fcab-41f8-a672-7fe2b6cfa1ce\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-q26rs" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.632870 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mw9sm\" (UniqueName: \"kubernetes.io/projected/918fbb0a-6011-4785-8b99-c69ef91af7ef-kube-api-access-mw9sm\") pod \"designate-operator-controller-manager-6d9697b7f4-wxxzb\" (UID: \"918fbb0a-6011-4785-8b99-c69ef91af7ef\") " pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-wxxzb" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.632998 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wh6qz\" (UniqueName: \"kubernetes.io/projected/2f2ed0c3-f32f-4402-9e49-3ef2c200c73a-kube-api-access-wh6qz\") pod \"heat-operator-controller-manager-69d6db494d-fzxq8\" (UID: \"2f2ed0c3-f32f-4402-9e49-3ef2c200c73a\") " pod="openstack-operators/heat-operator-controller-manager-69d6db494d-fzxq8" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.633139 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrp9j\" (UniqueName: \"kubernetes.io/projected/a634d819-9927-4b82-8ec1-959ca5f19908-kube-api-access-xrp9j\") pod \"glance-operator-controller-manager-8886f4c47-vb4kb\" (UID: \"a634d819-9927-4b82-8ec1-959ca5f19908\") " pod="openstack-operators/glance-operator-controller-manager-8886f4c47-vb4kb" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.633239 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert\") pod \"infra-operator-controller-manager-79955696d6-z2td9\" (UID: \"fd6c3383-802c-4e61-9dba-3f691d8d0fbc\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.633355 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kbp97\" (UniqueName: \"kubernetes.io/projected/6219460f-6432-4743-984d-e3c0ce8d4538-kube-api-access-kbp97\") pod \"cinder-operator-controller-manager-8d874c8fc-pdt6v\" (UID: \"6219460f-6432-4743-984d-e3c0ce8d4538\") " pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pdt6v" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.633450 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdk8g\" (UniqueName: \"kubernetes.io/projected/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-kube-api-access-mdk8g\") pod \"infra-operator-controller-manager-79955696d6-z2td9\" (UID: \"fd6c3383-802c-4e61-9dba-3f691d8d0fbc\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.633542 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8vd4\" (UniqueName: \"kubernetes.io/projected/7434f3b6-d77f-48b7-8ceb-f084a9c283f3-kube-api-access-t8vd4\") pod \"barbican-operator-controller-manager-7b6c4d8c5f-2l6qz\" (UID: \"7434f3b6-d77f-48b7-8ceb-f084a9c283f3\") " pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-2l6qz" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.674673 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-p245s"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.676060 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-p245s" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.686535 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-j8ff4" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.690665 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8vd4\" (UniqueName: \"kubernetes.io/projected/7434f3b6-d77f-48b7-8ceb-f084a9c283f3-kube-api-access-t8vd4\") pod \"barbican-operator-controller-manager-7b6c4d8c5f-2l6qz\" (UID: \"7434f3b6-d77f-48b7-8ceb-f084a9c283f3\") " pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-2l6qz" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.691091 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-2l6qz" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.701942 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kbp97\" (UniqueName: \"kubernetes.io/projected/6219460f-6432-4743-984d-e3c0ce8d4538-kube-api-access-kbp97\") pod \"cinder-operator-controller-manager-8d874c8fc-pdt6v\" (UID: \"6219460f-6432-4743-984d-e3c0ce8d4538\") " pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pdt6v" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.702435 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pdt6v" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.704542 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-7dd968899f-sbcr8"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.705667 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-sbcr8" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.706078 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrp9j\" (UniqueName: \"kubernetes.io/projected/a634d819-9927-4b82-8ec1-959ca5f19908-kube-api-access-xrp9j\") pod \"glance-operator-controller-manager-8886f4c47-vb4kb\" (UID: \"a634d819-9927-4b82-8ec1-959ca5f19908\") " pod="openstack-operators/glance-operator-controller-manager-8886f4c47-vb4kb" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.709058 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-qgt9h" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.735102 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vrgt\" (UniqueName: \"kubernetes.io/projected/61661318-fcab-41f8-a672-7fe2b6cfa1ce-kube-api-access-4vrgt\") pod \"horizon-operator-controller-manager-5fb775575f-q26rs\" (UID: \"61661318-fcab-41f8-a672-7fe2b6cfa1ce\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-q26rs" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.736008 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mw9sm\" (UniqueName: \"kubernetes.io/projected/918fbb0a-6011-4785-8b99-c69ef91af7ef-kube-api-access-mw9sm\") pod \"designate-operator-controller-manager-6d9697b7f4-wxxzb\" (UID: \"918fbb0a-6011-4785-8b99-c69ef91af7ef\") " pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-wxxzb" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.736167 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wh6qz\" (UniqueName: \"kubernetes.io/projected/2f2ed0c3-f32f-4402-9e49-3ef2c200c73a-kube-api-access-wh6qz\") pod \"heat-operator-controller-manager-69d6db494d-fzxq8\" (UID: \"2f2ed0c3-f32f-4402-9e49-3ef2c200c73a\") " pod="openstack-operators/heat-operator-controller-manager-69d6db494d-fzxq8" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.736303 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert\") pod \"infra-operator-controller-manager-79955696d6-z2td9\" (UID: \"fd6c3383-802c-4e61-9dba-3f691d8d0fbc\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.736402 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qw4dn\" (UniqueName: \"kubernetes.io/projected/0e5cd4fd-45a1-4cdb-9317-35abf01f5c33-kube-api-access-qw4dn\") pod \"ironic-operator-controller-manager-5f4b8bd54d-p245s\" (UID: \"0e5cd4fd-45a1-4cdb-9317-35abf01f5c33\") " pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-p245s" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.736538 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdk8g\" (UniqueName: \"kubernetes.io/projected/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-kube-api-access-mdk8g\") pod \"infra-operator-controller-manager-79955696d6-z2td9\" (UID: \"fd6c3383-802c-4e61-9dba-3f691d8d0fbc\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" Jan 30 12:14:57 crc kubenswrapper[4703]: E0130 12:14:57.737228 4703 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 30 12:14:57 crc kubenswrapper[4703]: E0130 12:14:57.737399 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert podName:fd6c3383-802c-4e61-9dba-3f691d8d0fbc nodeName:}" failed. No retries permitted until 2026-01-30 12:14:58.237374793 +0000 UTC m=+1134.015196457 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert") pod "infra-operator-controller-manager-79955696d6-z2td9" (UID: "fd6c3383-802c-4e61-9dba-3f691d8d0fbc") : secret "infra-operator-webhook-server-cert" not found Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.742452 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-84f48565d4-xv5lq"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.743598 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-xv5lq" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.758315 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-zzdzn"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.759596 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zzdzn" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.764163 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-dhgrm" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.765381 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-46wgs" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.806566 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-vb4kb" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.835322 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vrgt\" (UniqueName: \"kubernetes.io/projected/61661318-fcab-41f8-a672-7fe2b6cfa1ce-kube-api-access-4vrgt\") pod \"horizon-operator-controller-manager-5fb775575f-q26rs\" (UID: \"61661318-fcab-41f8-a672-7fe2b6cfa1ce\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-q26rs" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.838283 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9c7b9\" (UniqueName: \"kubernetes.io/projected/bb4211ca-5463-47ec-851d-86f23ff74397-kube-api-access-9c7b9\") pod \"mariadb-operator-controller-manager-67bf948998-zzdzn\" (UID: \"bb4211ca-5463-47ec-851d-86f23ff74397\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zzdzn" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.838435 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48f5h\" (UniqueName: \"kubernetes.io/projected/d09ba286-529c-4d2e-b56a-7c8efaff7fec-kube-api-access-48f5h\") pod \"manila-operator-controller-manager-7dd968899f-sbcr8\" (UID: \"d09ba286-529c-4d2e-b56a-7c8efaff7fec\") " pod="openstack-operators/manila-operator-controller-manager-7dd968899f-sbcr8" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.838568 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qw4dn\" (UniqueName: \"kubernetes.io/projected/0e5cd4fd-45a1-4cdb-9317-35abf01f5c33-kube-api-access-qw4dn\") pod \"ironic-operator-controller-manager-5f4b8bd54d-p245s\" (UID: \"0e5cd4fd-45a1-4cdb-9317-35abf01f5c33\") " pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-p245s" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.838651 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nq8mh\" (UniqueName: \"kubernetes.io/projected/9cc112c6-c3ca-4d9c-ab24-178578e1a41f-kube-api-access-nq8mh\") pod \"keystone-operator-controller-manager-84f48565d4-xv5lq\" (UID: \"9cc112c6-c3ca-4d9c-ab24-178578e1a41f\") " pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-xv5lq" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.845511 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wh6qz\" (UniqueName: \"kubernetes.io/projected/2f2ed0c3-f32f-4402-9e49-3ef2c200c73a-kube-api-access-wh6qz\") pod \"heat-operator-controller-manager-69d6db494d-fzxq8\" (UID: \"2f2ed0c3-f32f-4402-9e49-3ef2c200c73a\") " pod="openstack-operators/heat-operator-controller-manager-69d6db494d-fzxq8" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.846177 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mw9sm\" (UniqueName: \"kubernetes.io/projected/918fbb0a-6011-4785-8b99-c69ef91af7ef-kube-api-access-mw9sm\") pod \"designate-operator-controller-manager-6d9697b7f4-wxxzb\" (UID: \"918fbb0a-6011-4785-8b99-c69ef91af7ef\") " pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-wxxzb" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.850478 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-wxxzb" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.855851 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdk8g\" (UniqueName: \"kubernetes.io/projected/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-kube-api-access-mdk8g\") pod \"infra-operator-controller-manager-79955696d6-z2td9\" (UID: \"fd6c3383-802c-4e61-9dba-3f691d8d0fbc\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.879916 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-fzxq8" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.894677 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-84f48565d4-xv5lq"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.900458 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-zzdzn"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.912227 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-p245s"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.919407 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7dd968899f-sbcr8"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.919931 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-q26rs" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.929184 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-585dbc889-w9kvh"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.931417 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-w9kvh" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.950814 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-478ct" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.951032 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6687f8d877-nxm9c"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.954145 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9c7b9\" (UniqueName: \"kubernetes.io/projected/bb4211ca-5463-47ec-851d-86f23ff74397-kube-api-access-9c7b9\") pod \"mariadb-operator-controller-manager-67bf948998-zzdzn\" (UID: \"bb4211ca-5463-47ec-851d-86f23ff74397\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zzdzn" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.954261 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48f5h\" (UniqueName: \"kubernetes.io/projected/d09ba286-529c-4d2e-b56a-7c8efaff7fec-kube-api-access-48f5h\") pod \"manila-operator-controller-manager-7dd968899f-sbcr8\" (UID: \"d09ba286-529c-4d2e-b56a-7c8efaff7fec\") " pod="openstack-operators/manila-operator-controller-manager-7dd968899f-sbcr8" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.954332 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nq8mh\" (UniqueName: \"kubernetes.io/projected/9cc112c6-c3ca-4d9c-ab24-178578e1a41f-kube-api-access-nq8mh\") pod \"keystone-operator-controller-manager-84f48565d4-xv5lq\" (UID: \"9cc112c6-c3ca-4d9c-ab24-178578e1a41f\") " pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-xv5lq" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.958264 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-nxm9c" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.962289 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qw4dn\" (UniqueName: \"kubernetes.io/projected/0e5cd4fd-45a1-4cdb-9317-35abf01f5c33-kube-api-access-qw4dn\") pod \"ironic-operator-controller-manager-5f4b8bd54d-p245s\" (UID: \"0e5cd4fd-45a1-4cdb-9317-35abf01f5c33\") " pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-p245s" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.966665 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-55bff696bd-x7cwt"] Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.968310 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-x7cwt" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.985935 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-4mxbz" Jan 30 12:14:57 crc kubenswrapper[4703]: I0130 12:14:57.986314 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-29t5j" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.012833 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-585dbc889-w9kvh"] Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.029950 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48f5h\" (UniqueName: \"kubernetes.io/projected/d09ba286-529c-4d2e-b56a-7c8efaff7fec-kube-api-access-48f5h\") pod \"manila-operator-controller-manager-7dd968899f-sbcr8\" (UID: \"d09ba286-529c-4d2e-b56a-7c8efaff7fec\") " pod="openstack-operators/manila-operator-controller-manager-7dd968899f-sbcr8" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.061389 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phqzr\" (UniqueName: \"kubernetes.io/projected/a530abd9-3d62-4c65-b9e8-190208cbefd4-kube-api-access-phqzr\") pod \"neutron-operator-controller-manager-585dbc889-w9kvh\" (UID: \"a530abd9-3d62-4c65-b9e8-190208cbefd4\") " pod="openstack-operators/neutron-operator-controller-manager-585dbc889-w9kvh" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.061468 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzk7z\" (UniqueName: \"kubernetes.io/projected/e0183650-4de1-4a80-a310-3313864fae9b-kube-api-access-mzk7z\") pod \"nova-operator-controller-manager-55bff696bd-x7cwt\" (UID: \"e0183650-4de1-4a80-a310-3313864fae9b\") " pod="openstack-operators/nova-operator-controller-manager-55bff696bd-x7cwt" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.061488 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sl2js\" (UniqueName: \"kubernetes.io/projected/31df4dd4-aab4-4e73-9229-c78a40e82eda-kube-api-access-sl2js\") pod \"octavia-operator-controller-manager-6687f8d877-nxm9c\" (UID: \"31df4dd4-aab4-4e73-9229-c78a40e82eda\") " pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-nxm9c" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.075390 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9c7b9\" (UniqueName: \"kubernetes.io/projected/bb4211ca-5463-47ec-851d-86f23ff74397-kube-api-access-9c7b9\") pod \"mariadb-operator-controller-manager-67bf948998-zzdzn\" (UID: \"bb4211ca-5463-47ec-851d-86f23ff74397\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zzdzn" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.091956 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nq8mh\" (UniqueName: \"kubernetes.io/projected/9cc112c6-c3ca-4d9c-ab24-178578e1a41f-kube-api-access-nq8mh\") pod \"keystone-operator-controller-manager-84f48565d4-xv5lq\" (UID: \"9cc112c6-c3ca-4d9c-ab24-178578e1a41f\") " pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-xv5lq" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.092049 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-55bff696bd-x7cwt"] Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.133278 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6687f8d877-nxm9c"] Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.144433 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2"] Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.152408 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.168194 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phqzr\" (UniqueName: \"kubernetes.io/projected/a530abd9-3d62-4c65-b9e8-190208cbefd4-kube-api-access-phqzr\") pod \"neutron-operator-controller-manager-585dbc889-w9kvh\" (UID: \"a530abd9-3d62-4c65-b9e8-190208cbefd4\") " pod="openstack-operators/neutron-operator-controller-manager-585dbc889-w9kvh" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.168265 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzk7z\" (UniqueName: \"kubernetes.io/projected/e0183650-4de1-4a80-a310-3313864fae9b-kube-api-access-mzk7z\") pod \"nova-operator-controller-manager-55bff696bd-x7cwt\" (UID: \"e0183650-4de1-4a80-a310-3313864fae9b\") " pod="openstack-operators/nova-operator-controller-manager-55bff696bd-x7cwt" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.168311 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sl2js\" (UniqueName: \"kubernetes.io/projected/31df4dd4-aab4-4e73-9229-c78a40e82eda-kube-api-access-sl2js\") pod \"octavia-operator-controller-manager-6687f8d877-nxm9c\" (UID: \"31df4dd4-aab4-4e73-9229-c78a40e82eda\") " pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-nxm9c" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.169339 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.170241 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-bzdlb" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.282853 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-p245s" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.293758 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2"] Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.307816 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-xv5lq" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.326217 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-sbcr8" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.327524 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-zqh8w"] Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.328583 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phqzr\" (UniqueName: \"kubernetes.io/projected/a530abd9-3d62-4c65-b9e8-190208cbefd4-kube-api-access-phqzr\") pod \"neutron-operator-controller-manager-585dbc889-w9kvh\" (UID: \"a530abd9-3d62-4c65-b9e8-190208cbefd4\") " pod="openstack-operators/neutron-operator-controller-manager-585dbc889-w9kvh" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.329701 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zzdzn" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.330774 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzk7z\" (UniqueName: \"kubernetes.io/projected/e0183650-4de1-4a80-a310-3313864fae9b-kube-api-access-mzk7z\") pod \"nova-operator-controller-manager-55bff696bd-x7cwt\" (UID: \"e0183650-4de1-4a80-a310-3313864fae9b\") " pod="openstack-operators/nova-operator-controller-manager-55bff696bd-x7cwt" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.330964 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzk7z\" (UniqueName: \"kubernetes.io/projected/e0183650-4de1-4a80-a310-3313864fae9b-kube-api-access-mzk7z\") pod \"nova-operator-controller-manager-55bff696bd-x7cwt\" (UID: \"e0183650-4de1-4a80-a310-3313864fae9b\") " pod="openstack-operators/nova-operator-controller-manager-55bff696bd-x7cwt" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.331092 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2\" (UID: \"98241c60-d78a-4a93-bfb8-a061e65c7c83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.331138 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2cgl\" (UniqueName: \"kubernetes.io/projected/98241c60-d78a-4a93-bfb8-a061e65c7c83-kube-api-access-n2cgl\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2\" (UID: \"98241c60-d78a-4a93-bfb8-a061e65c7c83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.331229 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert\") pod \"infra-operator-controller-manager-79955696d6-z2td9\" (UID: \"fd6c3383-802c-4e61-9dba-3f691d8d0fbc\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" Jan 30 12:14:58 crc kubenswrapper[4703]: E0130 12:14:58.331494 4703 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 30 12:14:58 crc kubenswrapper[4703]: E0130 12:14:58.342730 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert podName:fd6c3383-802c-4e61-9dba-3f691d8d0fbc nodeName:}" failed. No retries permitted until 2026-01-30 12:14:59.342651073 +0000 UTC m=+1135.120472727 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert") pod "infra-operator-controller-manager-79955696d6-z2td9" (UID: "fd6c3383-802c-4e61-9dba-3f691d8d0fbc") : secret "infra-operator-webhook-server-cert" not found Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.351237 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sl2js\" (UniqueName: \"kubernetes.io/projected/31df4dd4-aab4-4e73-9229-c78a40e82eda-kube-api-access-sl2js\") pod \"octavia-operator-controller-manager-6687f8d877-nxm9c\" (UID: \"31df4dd4-aab4-4e73-9229-c78a40e82eda\") " pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-nxm9c" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.353468 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzk7z\" (UniqueName: \"kubernetes.io/projected/e0183650-4de1-4a80-a310-3313864fae9b-kube-api-access-mzk7z\") pod \"nova-operator-controller-manager-55bff696bd-x7cwt\" (UID: \"e0183650-4de1-4a80-a310-3313864fae9b\") " pod="openstack-operators/nova-operator-controller-manager-55bff696bd-x7cwt" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.362205 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zqh8w" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.380616 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-lbkhm" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.433512 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2\" (UID: \"98241c60-d78a-4a93-bfb8-a061e65c7c83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.433566 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2cgl\" (UniqueName: \"kubernetes.io/projected/98241c60-d78a-4a93-bfb8-a061e65c7c83-kube-api-access-n2cgl\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2\" (UID: \"98241c60-d78a-4a93-bfb8-a061e65c7c83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.433688 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slwrk\" (UniqueName: \"kubernetes.io/projected/baed05bb-6058-489e-b1ad-424333b94494-kube-api-access-slwrk\") pod \"ovn-operator-controller-manager-788c46999f-zqh8w\" (UID: \"baed05bb-6058-489e-b1ad-424333b94494\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zqh8w" Jan 30 12:14:58 crc kubenswrapper[4703]: E0130 12:14:58.433928 4703 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 30 12:14:58 crc kubenswrapper[4703]: E0130 12:14:58.433986 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert podName:98241c60-d78a-4a93-bfb8-a061e65c7c83 nodeName:}" failed. No retries permitted until 2026-01-30 12:14:58.933962744 +0000 UTC m=+1134.711784398 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" (UID: "98241c60-d78a-4a93-bfb8-a061e65c7c83") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.436245 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-nxm9c" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.479883 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-x7cwt" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.485195 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-qzmpv"] Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.486781 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-qzmpv" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.533618 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-jnhp7" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.535309 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9s5g\" (UniqueName: \"kubernetes.io/projected/2d50762e-d50c-4448-bb9c-19136516a622-kube-api-access-k9s5g\") pod \"placement-operator-controller-manager-5b964cf4cd-qzmpv\" (UID: \"2d50762e-d50c-4448-bb9c-19136516a622\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-qzmpv" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.535491 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slwrk\" (UniqueName: \"kubernetes.io/projected/baed05bb-6058-489e-b1ad-424333b94494-kube-api-access-slwrk\") pod \"ovn-operator-controller-manager-788c46999f-zqh8w\" (UID: \"baed05bb-6058-489e-b1ad-424333b94494\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zqh8w" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.544403 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2cgl\" (UniqueName: \"kubernetes.io/projected/98241c60-d78a-4a93-bfb8-a061e65c7c83-kube-api-access-n2cgl\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2\" (UID: \"98241c60-d78a-4a93-bfb8-a061e65c7c83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.621986 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-w9kvh" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.640146 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9s5g\" (UniqueName: \"kubernetes.io/projected/2d50762e-d50c-4448-bb9c-19136516a622-kube-api-access-k9s5g\") pod \"placement-operator-controller-manager-5b964cf4cd-qzmpv\" (UID: \"2d50762e-d50c-4448-bb9c-19136516a622\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-qzmpv" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.655790 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-qzmpv"] Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.676499 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-zqh8w"] Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.709951 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slwrk\" (UniqueName: \"kubernetes.io/projected/baed05bb-6058-489e-b1ad-424333b94494-kube-api-access-slwrk\") pod \"ovn-operator-controller-manager-788c46999f-zqh8w\" (UID: \"baed05bb-6058-489e-b1ad-424333b94494\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zqh8w" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.716864 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9s5g\" (UniqueName: \"kubernetes.io/projected/2d50762e-d50c-4448-bb9c-19136516a622-kube-api-access-k9s5g\") pod \"placement-operator-controller-manager-5b964cf4cd-qzmpv\" (UID: \"2d50762e-d50c-4448-bb9c-19136516a622\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-qzmpv" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.719765 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-hw7pr"] Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.721374 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-hw7pr" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.731620 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b5b76f97-cwdkz"] Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.743680 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zqh8w" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.744616 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-cwdkz" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.745408 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rpvdl\" (UniqueName: \"kubernetes.io/projected/f1787cd3-2c84-43bc-87dd-5356e44ba9cd-kube-api-access-rpvdl\") pod \"swift-operator-controller-manager-68fc8c869-hw7pr\" (UID: \"f1787cd3-2c84-43bc-87dd-5356e44ba9cd\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-hw7pr" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.746095 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-tkwgz" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.760224 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-5n9ph" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.800052 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-hw7pr"] Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.845744 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-x2z8q"] Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.855951 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-x2z8q" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.857721 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gkws\" (UniqueName: \"kubernetes.io/projected/425e555c-7283-4169-9293-e380104d38ca-kube-api-access-6gkws\") pod \"telemetry-operator-controller-manager-64b5b76f97-cwdkz\" (UID: \"425e555c-7283-4169-9293-e380104d38ca\") " pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-cwdkz" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.859160 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rpvdl\" (UniqueName: \"kubernetes.io/projected/f1787cd3-2c84-43bc-87dd-5356e44ba9cd-kube-api-access-rpvdl\") pod \"swift-operator-controller-manager-68fc8c869-hw7pr\" (UID: \"f1787cd3-2c84-43bc-87dd-5356e44ba9cd\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-hw7pr" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.879352 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-4x2g8" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.894518 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b5b76f97-cwdkz"] Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.898544 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rpvdl\" (UniqueName: \"kubernetes.io/projected/f1787cd3-2c84-43bc-87dd-5356e44ba9cd-kube-api-access-rpvdl\") pod \"swift-operator-controller-manager-68fc8c869-hw7pr\" (UID: \"f1787cd3-2c84-43bc-87dd-5356e44ba9cd\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-hw7pr" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.944478 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-79c984db64-qk88l"] Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.945733 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-79c984db64-qk88l" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.950582 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-525xz" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.971820 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gkws\" (UniqueName: \"kubernetes.io/projected/425e555c-7283-4169-9293-e380104d38ca-kube-api-access-6gkws\") pod \"telemetry-operator-controller-manager-64b5b76f97-cwdkz\" (UID: \"425e555c-7283-4169-9293-e380104d38ca\") " pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-cwdkz" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.972246 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2\" (UID: \"98241c60-d78a-4a93-bfb8-a061e65c7c83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.972362 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlcdz\" (UniqueName: \"kubernetes.io/projected/842c0b1e-43ca-49b3-bcc0-a5f9714773ac-kube-api-access-hlcdz\") pod \"test-operator-controller-manager-56f8bfcd9f-x2z8q\" (UID: \"842c0b1e-43ca-49b3-bcc0-a5f9714773ac\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-x2z8q" Jan 30 12:14:58 crc kubenswrapper[4703]: E0130 12:14:58.973023 4703 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 30 12:14:58 crc kubenswrapper[4703]: E0130 12:14:58.973619 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert podName:98241c60-d78a-4a93-bfb8-a061e65c7c83 nodeName:}" failed. No retries permitted until 2026-01-30 12:14:59.973592936 +0000 UTC m=+1135.751414590 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" (UID: "98241c60-d78a-4a93-bfb8-a061e65c7c83") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.976910 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-x2z8q"] Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.992252 4703 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 12:14:58 crc kubenswrapper[4703]: I0130 12:14:58.992918 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-qzmpv" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.046501 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gkws\" (UniqueName: \"kubernetes.io/projected/425e555c-7283-4169-9293-e380104d38ca-kube-api-access-6gkws\") pod \"telemetry-operator-controller-manager-64b5b76f97-cwdkz\" (UID: \"425e555c-7283-4169-9293-e380104d38ca\") " pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-cwdkz" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.046601 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-79c984db64-qk88l"] Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.075172 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlcdz\" (UniqueName: \"kubernetes.io/projected/842c0b1e-43ca-49b3-bcc0-a5f9714773ac-kube-api-access-hlcdz\") pod \"test-operator-controller-manager-56f8bfcd9f-x2z8q\" (UID: \"842c0b1e-43ca-49b3-bcc0-a5f9714773ac\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-x2z8q" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.075399 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tg4jh\" (UniqueName: \"kubernetes.io/projected/7f8888ec-800b-41b1-a297-576a28957668-kube-api-access-tg4jh\") pod \"watcher-operator-controller-manager-79c984db64-qk88l\" (UID: \"7f8888ec-800b-41b1-a297-576a28957668\") " pod="openstack-operators/watcher-operator-controller-manager-79c984db64-qk88l" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.080413 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-hw7pr" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.103575 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlcdz\" (UniqueName: \"kubernetes.io/projected/842c0b1e-43ca-49b3-bcc0-a5f9714773ac-kube-api-access-hlcdz\") pod \"test-operator-controller-manager-56f8bfcd9f-x2z8q\" (UID: \"842c0b1e-43ca-49b3-bcc0-a5f9714773ac\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-x2z8q" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.133245 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-cwdkz" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.152622 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-x2z8q" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.162539 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-8d874c8fc-pdt6v"] Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.189584 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tg4jh\" (UniqueName: \"kubernetes.io/projected/7f8888ec-800b-41b1-a297-576a28957668-kube-api-access-tg4jh\") pod \"watcher-operator-controller-manager-79c984db64-qk88l\" (UID: \"7f8888ec-800b-41b1-a297-576a28957668\") " pod="openstack-operators/watcher-operator-controller-manager-79c984db64-qk88l" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.274939 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tg4jh\" (UniqueName: \"kubernetes.io/projected/7f8888ec-800b-41b1-a297-576a28957668-kube-api-access-tg4jh\") pod \"watcher-operator-controller-manager-79c984db64-qk88l\" (UID: \"7f8888ec-800b-41b1-a297-576a28957668\") " pod="openstack-operators/watcher-operator-controller-manager-79c984db64-qk88l" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.286044 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79"] Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.294962 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.301632 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79"] Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.306304 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mfgtf"] Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.307745 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mfgtf" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.310620 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.310929 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-q6xxw" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.311082 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.311585 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-mglv9" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.319556 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mfgtf"] Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.404975 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.405093 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert\") pod \"infra-operator-controller-manager-79955696d6-z2td9\" (UID: \"fd6c3383-802c-4e61-9dba-3f691d8d0fbc\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.405171 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.405266 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkkmz\" (UniqueName: \"kubernetes.io/projected/dfa2e6c9-10c2-48ee-8aed-315d5bf5b1c5-kube-api-access-xkkmz\") pod \"rabbitmq-cluster-operator-manager-668c99d594-mfgtf\" (UID: \"dfa2e6c9-10c2-48ee-8aed-315d5bf5b1c5\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mfgtf" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.405391 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tmf9\" (UniqueName: \"kubernetes.io/projected/9d4e1655-d610-4867-b435-4ec960bb483c-kube-api-access-6tmf9\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:14:59 crc kubenswrapper[4703]: E0130 12:14:59.405655 4703 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 30 12:14:59 crc kubenswrapper[4703]: E0130 12:14:59.405735 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert podName:fd6c3383-802c-4e61-9dba-3f691d8d0fbc nodeName:}" failed. No retries permitted until 2026-01-30 12:15:01.4057131 +0000 UTC m=+1137.183534754 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert") pod "infra-operator-controller-manager-79955696d6-z2td9" (UID: "fd6c3383-802c-4e61-9dba-3f691d8d0fbc") : secret "infra-operator-webhook-server-cert" not found Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.409493 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pdt6v" event={"ID":"6219460f-6432-4743-984d-e3c0ce8d4538","Type":"ContainerStarted","Data":"336289a623457b20b8cb79e076c4ff08ded9b20f144cd7a05fda8a467e92cc9b"} Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.487545 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-2l6qz"] Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.506707 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-79c984db64-qk88l" Jan 30 12:14:59 crc kubenswrapper[4703]: E0130 12:14:59.514798 4703 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 30 12:14:59 crc kubenswrapper[4703]: E0130 12:14:59.514905 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs podName:9d4e1655-d610-4867-b435-4ec960bb483c nodeName:}" failed. No retries permitted until 2026-01-30 12:15:00.014876672 +0000 UTC m=+1135.792698326 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs") pod "openstack-operator-controller-manager-6987d5d556-cff79" (UID: "9d4e1655-d610-4867-b435-4ec960bb483c") : secret "metrics-server-cert" not found Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.514617 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.515509 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:14:59 crc kubenswrapper[4703]: E0130 12:14:59.515612 4703 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 30 12:14:59 crc kubenswrapper[4703]: E0130 12:14:59.515659 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs podName:9d4e1655-d610-4867-b435-4ec960bb483c nodeName:}" failed. No retries permitted until 2026-01-30 12:15:00.015646623 +0000 UTC m=+1135.793468277 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs") pod "openstack-operator-controller-manager-6987d5d556-cff79" (UID: "9d4e1655-d610-4867-b435-4ec960bb483c") : secret "webhook-server-cert" not found Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.515751 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkkmz\" (UniqueName: \"kubernetes.io/projected/dfa2e6c9-10c2-48ee-8aed-315d5bf5b1c5-kube-api-access-xkkmz\") pod \"rabbitmq-cluster-operator-manager-668c99d594-mfgtf\" (UID: \"dfa2e6c9-10c2-48ee-8aed-315d5bf5b1c5\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mfgtf" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.515868 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tmf9\" (UniqueName: \"kubernetes.io/projected/9d4e1655-d610-4867-b435-4ec960bb483c-kube-api-access-6tmf9\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.547068 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkkmz\" (UniqueName: \"kubernetes.io/projected/dfa2e6c9-10c2-48ee-8aed-315d5bf5b1c5-kube-api-access-xkkmz\") pod \"rabbitmq-cluster-operator-manager-668c99d594-mfgtf\" (UID: \"dfa2e6c9-10c2-48ee-8aed-315d5bf5b1c5\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mfgtf" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.553870 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tmf9\" (UniqueName: \"kubernetes.io/projected/9d4e1655-d610-4867-b435-4ec960bb483c-kube-api-access-6tmf9\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:14:59 crc kubenswrapper[4703]: I0130 12:14:59.718364 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mfgtf" Jan 30 12:14:59 crc kubenswrapper[4703]: W0130 12:14:59.766252 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7434f3b6_d77f_48b7_8ceb_f084a9c283f3.slice/crio-79dbdbc2aad6ead9df3c8cf1c6dfd43710534183517f8af7fb8316adb61bb019 WatchSource:0}: Error finding container 79dbdbc2aad6ead9df3c8cf1c6dfd43710534183517f8af7fb8316adb61bb019: Status 404 returned error can't find the container with id 79dbdbc2aad6ead9df3c8cf1c6dfd43710534183517f8af7fb8316adb61bb019 Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.028711 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2\" (UID: \"98241c60-d78a-4a93-bfb8-a061e65c7c83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.028803 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.028866 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:15:00 crc kubenswrapper[4703]: E0130 12:15:00.029089 4703 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 30 12:15:00 crc kubenswrapper[4703]: E0130 12:15:00.029199 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs podName:9d4e1655-d610-4867-b435-4ec960bb483c nodeName:}" failed. No retries permitted until 2026-01-30 12:15:01.029171072 +0000 UTC m=+1136.806992726 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs") pod "openstack-operator-controller-manager-6987d5d556-cff79" (UID: "9d4e1655-d610-4867-b435-4ec960bb483c") : secret "webhook-server-cert" not found Jan 30 12:15:00 crc kubenswrapper[4703]: E0130 12:15:00.029707 4703 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 30 12:15:00 crc kubenswrapper[4703]: E0130 12:15:00.029739 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert podName:98241c60-d78a-4a93-bfb8-a061e65c7c83 nodeName:}" failed. No retries permitted until 2026-01-30 12:15:02.029730317 +0000 UTC m=+1137.807551971 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" (UID: "98241c60-d78a-4a93-bfb8-a061e65c7c83") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 30 12:15:00 crc kubenswrapper[4703]: E0130 12:15:00.029779 4703 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 30 12:15:00 crc kubenswrapper[4703]: E0130 12:15:00.029799 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs podName:9d4e1655-d610-4867-b435-4ec960bb483c nodeName:}" failed. No retries permitted until 2026-01-30 12:15:01.029792518 +0000 UTC m=+1136.807614172 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs") pod "openstack-operator-controller-manager-6987d5d556-cff79" (UID: "9d4e1655-d610-4867-b435-4ec960bb483c") : secret "metrics-server-cert" not found Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.244213 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42"] Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.254594 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42"] Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.254745 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42" Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.263518 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.267752 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.278276 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-6d9697b7f4-wxxzb"] Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.302176 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-8886f4c47-vb4kb"] Jan 30 12:15:00 crc kubenswrapper[4703]: W0130 12:15:00.303957 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod918fbb0a_6011_4785_8b99_c69ef91af7ef.slice/crio-2c362567a053341c8f124132043fcefc9054e97965549df80d89d99cf3079607 WatchSource:0}: Error finding container 2c362567a053341c8f124132043fcefc9054e97965549df80d89d99cf3079607: Status 404 returned error can't find the container with id 2c362567a053341c8f124132043fcefc9054e97965549df80d89d99cf3079607 Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.347587 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b063a5d7-e03b-46db-85b0-75adab6bca8f-secret-volume\") pod \"collect-profiles-29496255-cdn42\" (UID: \"b063a5d7-e03b-46db-85b0-75adab6bca8f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42" Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.358602 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkrj5\" (UniqueName: \"kubernetes.io/projected/b063a5d7-e03b-46db-85b0-75adab6bca8f-kube-api-access-zkrj5\") pod \"collect-profiles-29496255-cdn42\" (UID: \"b063a5d7-e03b-46db-85b0-75adab6bca8f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42" Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.358920 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b063a5d7-e03b-46db-85b0-75adab6bca8f-config-volume\") pod \"collect-profiles-29496255-cdn42\" (UID: \"b063a5d7-e03b-46db-85b0-75adab6bca8f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42" Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.348234 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-69d6db494d-fzxq8"] Jan 30 12:15:00 crc kubenswrapper[4703]: W0130 12:15:00.348465 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda634d819_9927_4b82_8ec1_959ca5f19908.slice/crio-747dec34ec3e75e52f955a2a79f626b294f5403034991f0ad37aca14135cab65 WatchSource:0}: Error finding container 747dec34ec3e75e52f955a2a79f626b294f5403034991f0ad37aca14135cab65: Status 404 returned error can't find the container with id 747dec34ec3e75e52f955a2a79f626b294f5403034991f0ad37aca14135cab65 Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.456809 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-vb4kb" event={"ID":"a634d819-9927-4b82-8ec1-959ca5f19908","Type":"ContainerStarted","Data":"747dec34ec3e75e52f955a2a79f626b294f5403034991f0ad37aca14135cab65"} Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.462677 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b063a5d7-e03b-46db-85b0-75adab6bca8f-secret-volume\") pod \"collect-profiles-29496255-cdn42\" (UID: \"b063a5d7-e03b-46db-85b0-75adab6bca8f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42" Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.462810 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkrj5\" (UniqueName: \"kubernetes.io/projected/b063a5d7-e03b-46db-85b0-75adab6bca8f-kube-api-access-zkrj5\") pod \"collect-profiles-29496255-cdn42\" (UID: \"b063a5d7-e03b-46db-85b0-75adab6bca8f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42" Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.462964 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b063a5d7-e03b-46db-85b0-75adab6bca8f-config-volume\") pod \"collect-profiles-29496255-cdn42\" (UID: \"b063a5d7-e03b-46db-85b0-75adab6bca8f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42" Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.464012 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b063a5d7-e03b-46db-85b0-75adab6bca8f-config-volume\") pod \"collect-profiles-29496255-cdn42\" (UID: \"b063a5d7-e03b-46db-85b0-75adab6bca8f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42" Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.471477 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-wxxzb" event={"ID":"918fbb0a-6011-4785-8b99-c69ef91af7ef","Type":"ContainerStarted","Data":"2c362567a053341c8f124132043fcefc9054e97965549df80d89d99cf3079607"} Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.492193 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b063a5d7-e03b-46db-85b0-75adab6bca8f-secret-volume\") pod \"collect-profiles-29496255-cdn42\" (UID: \"b063a5d7-e03b-46db-85b0-75adab6bca8f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42" Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.500655 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-fzxq8" event={"ID":"2f2ed0c3-f32f-4402-9e49-3ef2c200c73a","Type":"ContainerStarted","Data":"c1c99dea2d84488538bc4ac5ad6efc0c6f3006f774df5e0b5192cf1f92c40a23"} Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.504658 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkrj5\" (UniqueName: \"kubernetes.io/projected/b063a5d7-e03b-46db-85b0-75adab6bca8f-kube-api-access-zkrj5\") pod \"collect-profiles-29496255-cdn42\" (UID: \"b063a5d7-e03b-46db-85b0-75adab6bca8f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42" Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.530471 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-2l6qz" event={"ID":"7434f3b6-d77f-48b7-8ceb-f084a9c283f3","Type":"ContainerStarted","Data":"79dbdbc2aad6ead9df3c8cf1c6dfd43710534183517f8af7fb8316adb61bb019"} Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.663916 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-84f48565d4-xv5lq"] Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.675627 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42" Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.699331 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-p245s"] Jan 30 12:15:00 crc kubenswrapper[4703]: W0130 12:15:00.725265 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e5cd4fd_45a1_4cdb_9317_35abf01f5c33.slice/crio-d2abf6a652103d8356384c42f575230a4c67744c19386c9c99a2cf8e6dcae1c4 WatchSource:0}: Error finding container d2abf6a652103d8356384c42f575230a4c67744c19386c9c99a2cf8e6dcae1c4: Status 404 returned error can't find the container with id d2abf6a652103d8356384c42f575230a4c67744c19386c9c99a2cf8e6dcae1c4 Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.888044 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7dd968899f-sbcr8"] Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.902171 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-q26rs"] Jan 30 12:15:00 crc kubenswrapper[4703]: I0130 12:15:00.909361 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-585dbc889-w9kvh"] Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.088104 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.088197 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:15:01 crc kubenswrapper[4703]: E0130 12:15:01.088373 4703 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 30 12:15:01 crc kubenswrapper[4703]: E0130 12:15:01.088431 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs podName:9d4e1655-d610-4867-b435-4ec960bb483c nodeName:}" failed. No retries permitted until 2026-01-30 12:15:03.088413066 +0000 UTC m=+1138.866234720 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs") pod "openstack-operator-controller-manager-6987d5d556-cff79" (UID: "9d4e1655-d610-4867-b435-4ec960bb483c") : secret "webhook-server-cert" not found Jan 30 12:15:01 crc kubenswrapper[4703]: E0130 12:15:01.088806 4703 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 30 12:15:01 crc kubenswrapper[4703]: E0130 12:15:01.088834 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs podName:9d4e1655-d610-4867-b435-4ec960bb483c nodeName:}" failed. No retries permitted until 2026-01-30 12:15:03.088827096 +0000 UTC m=+1138.866648740 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs") pod "openstack-operator-controller-manager-6987d5d556-cff79" (UID: "9d4e1655-d610-4867-b435-4ec960bb483c") : secret "metrics-server-cert" not found Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.205890 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-hw7pr"] Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.266862 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-qzmpv"] Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.295235 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-zzdzn"] Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.312832 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-zqh8w"] Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.322965 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-x2z8q"] Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.333402 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-64b5b76f97-cwdkz"] Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.340579 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-55bff696bd-x7cwt"] Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.390703 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42"] Jan 30 12:15:01 crc kubenswrapper[4703]: W0130 12:15:01.392277 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbaed05bb_6058_489e_b1ad_424333b94494.slice/crio-0611ab74ca4f45ebe489957e87ea37f77f157e0f225af4ccd3badafe2ef8b5c5 WatchSource:0}: Error finding container 0611ab74ca4f45ebe489957e87ea37f77f157e0f225af4ccd3badafe2ef8b5c5: Status 404 returned error can't find the container with id 0611ab74ca4f45ebe489957e87ea37f77f157e0f225af4ccd3badafe2ef8b5c5 Jan 30 12:15:01 crc kubenswrapper[4703]: W0130 12:15:01.395183 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode0183650_4de1_4a80_a310_3313864fae9b.slice/crio-d7e2fb253a965cf2ff7c0927d2399130fe78fdd74a87f22f8668d868e4ef1685 WatchSource:0}: Error finding container d7e2fb253a965cf2ff7c0927d2399130fe78fdd74a87f22f8668d868e4ef1685: Status 404 returned error can't find the container with id d7e2fb253a965cf2ff7c0927d2399130fe78fdd74a87f22f8668d868e4ef1685 Jan 30 12:15:01 crc kubenswrapper[4703]: W0130 12:15:01.401096 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod425e555c_7283_4169_9293_e380104d38ca.slice/crio-815d1d66f1563ae7c9d9f022cf2c468095f0e3b8f1aec29855ad034cbec65eb1 WatchSource:0}: Error finding container 815d1d66f1563ae7c9d9f022cf2c468095f0e3b8f1aec29855ad034cbec65eb1: Status 404 returned error can't find the container with id 815d1d66f1563ae7c9d9f022cf2c468095f0e3b8f1aec29855ad034cbec65eb1 Jan 30 12:15:01 crc kubenswrapper[4703]: W0130 12:15:01.419958 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod842c0b1e_43ca_49b3_bcc0_a5f9714773ac.slice/crio-c9fdd9472cf3ff7b13d7d2da08bd7e95d83ae97ee370f1f7db9f42a17795c14c WatchSource:0}: Error finding container c9fdd9472cf3ff7b13d7d2da08bd7e95d83ae97ee370f1f7db9f42a17795c14c: Status 404 returned error can't find the container with id c9fdd9472cf3ff7b13d7d2da08bd7e95d83ae97ee370f1f7db9f42a17795c14c Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.477526 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mfgtf"] Jan 30 12:15:01 crc kubenswrapper[4703]: E0130 12:15:01.490006 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xkkmz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-mfgtf_openstack-operators(dfa2e6c9-10c2-48ee-8aed-315d5bf5b1c5): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 30 12:15:01 crc kubenswrapper[4703]: E0130 12:15:01.491264 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mfgtf" podUID="dfa2e6c9-10c2-48ee-8aed-315d5bf5b1c5" Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.494699 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert\") pod \"infra-operator-controller-manager-79955696d6-z2td9\" (UID: \"fd6c3383-802c-4e61-9dba-3f691d8d0fbc\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" Jan 30 12:15:01 crc kubenswrapper[4703]: E0130 12:15:01.494920 4703 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 30 12:15:01 crc kubenswrapper[4703]: E0130 12:15:01.494982 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert podName:fd6c3383-802c-4e61-9dba-3f691d8d0fbc nodeName:}" failed. No retries permitted until 2026-01-30 12:15:05.494959771 +0000 UTC m=+1141.272781415 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert") pod "infra-operator-controller-manager-79955696d6-z2td9" (UID: "fd6c3383-802c-4e61-9dba-3f691d8d0fbc") : secret "infra-operator-webhook-server-cert" not found Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.505662 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-79c984db64-qk88l"] Jan 30 12:15:01 crc kubenswrapper[4703]: W0130 12:15:01.509191 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod31df4dd4_aab4_4e73_9229_c78a40e82eda.slice/crio-9bcad57a99aab91e6b030b6c05557be4b33bedb65b3daec36e4b51177ae33bef WatchSource:0}: Error finding container 9bcad57a99aab91e6b030b6c05557be4b33bedb65b3daec36e4b51177ae33bef: Status 404 returned error can't find the container with id 9bcad57a99aab91e6b030b6c05557be4b33bedb65b3daec36e4b51177ae33bef Jan 30 12:15:01 crc kubenswrapper[4703]: E0130 12:15:01.515792 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:e6f2f361f1dcbb321407a5884951e16ff96e7b88942b10b548f27ad4de14a0be,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sl2js,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-6687f8d877-nxm9c_openstack-operators(31df4dd4-aab4-4e73-9229-c78a40e82eda): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 30 12:15:01 crc kubenswrapper[4703]: E0130 12:15:01.517065 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-nxm9c" podUID="31df4dd4-aab4-4e73-9229-c78a40e82eda" Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.528867 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6687f8d877-nxm9c"] Jan 30 12:15:01 crc kubenswrapper[4703]: W0130 12:15:01.543062 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7f8888ec_800b_41b1_a297_576a28957668.slice/crio-82afe22a9e85bfc5434667b5cc3f97db8220f86c28e281b4a5e0cd292a098ba5 WatchSource:0}: Error finding container 82afe22a9e85bfc5434667b5cc3f97db8220f86c28e281b4a5e0cd292a098ba5: Status 404 returned error can't find the container with id 82afe22a9e85bfc5434667b5cc3f97db8220f86c28e281b4a5e0cd292a098ba5 Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.548809 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-q26rs" event={"ID":"61661318-fcab-41f8-a672-7fe2b6cfa1ce","Type":"ContainerStarted","Data":"d1f400e61bc12e9d173e9d9d27cf0518731d6942ee4926590f4f108de2985e5c"} Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.553097 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-hw7pr" event={"ID":"f1787cd3-2c84-43bc-87dd-5356e44ba9cd","Type":"ContainerStarted","Data":"1acc8f9a6c2317faf3eb43dfbd34ea8d180210736d43f24d8323759590b8db5e"} Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.556705 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-qzmpv" event={"ID":"2d50762e-d50c-4448-bb9c-19136516a622","Type":"ContainerStarted","Data":"129e56b110f7ab0e25112a883720da131358a99df53c64e1f9c5ce070b7c00bb"} Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.563004 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-w9kvh" event={"ID":"a530abd9-3d62-4c65-b9e8-190208cbefd4","Type":"ContainerStarted","Data":"774b04b9668984831bc4d737e3e41ec950236467414c1a4d52026905048f5d7e"} Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.565477 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-nxm9c" event={"ID":"31df4dd4-aab4-4e73-9229-c78a40e82eda","Type":"ContainerStarted","Data":"9bcad57a99aab91e6b030b6c05557be4b33bedb65b3daec36e4b51177ae33bef"} Jan 30 12:15:01 crc kubenswrapper[4703]: E0130 12:15:01.570955 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:e6f2f361f1dcbb321407a5884951e16ff96e7b88942b10b548f27ad4de14a0be\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-nxm9c" podUID="31df4dd4-aab4-4e73-9229-c78a40e82eda" Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.576265 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-x7cwt" event={"ID":"e0183650-4de1-4a80-a310-3313864fae9b","Type":"ContainerStarted","Data":"d7e2fb253a965cf2ff7c0927d2399130fe78fdd74a87f22f8668d868e4ef1685"} Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.579861 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42" event={"ID":"b063a5d7-e03b-46db-85b0-75adab6bca8f","Type":"ContainerStarted","Data":"a55e6f30af95bbbaf9dd324bdacb39d732319100a255ab7331ed6f25bef45f7d"} Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.583610 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-x2z8q" event={"ID":"842c0b1e-43ca-49b3-bcc0-a5f9714773ac","Type":"ContainerStarted","Data":"c9fdd9472cf3ff7b13d7d2da08bd7e95d83ae97ee370f1f7db9f42a17795c14c"} Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.628165 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-p245s" event={"ID":"0e5cd4fd-45a1-4cdb-9317-35abf01f5c33","Type":"ContainerStarted","Data":"d2abf6a652103d8356384c42f575230a4c67744c19386c9c99a2cf8e6dcae1c4"} Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.634588 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-sbcr8" event={"ID":"d09ba286-529c-4d2e-b56a-7c8efaff7fec","Type":"ContainerStarted","Data":"32684086d265506e20c198a4b9819a5e21178c7000109f3fbe5ee22ec259a5c1"} Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.641702 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-cwdkz" event={"ID":"425e555c-7283-4169-9293-e380104d38ca","Type":"ContainerStarted","Data":"815d1d66f1563ae7c9d9f022cf2c468095f0e3b8f1aec29855ad034cbec65eb1"} Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.651343 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zzdzn" event={"ID":"bb4211ca-5463-47ec-851d-86f23ff74397","Type":"ContainerStarted","Data":"ae74910065d008618950064717cd934d40bfed5150ccb9aa38b7b61600979c19"} Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.659960 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-xv5lq" event={"ID":"9cc112c6-c3ca-4d9c-ab24-178578e1a41f","Type":"ContainerStarted","Data":"73efe95bb9ad916b787fe1744adc432c5cd3da9bf9cf88a777a9a9f3342cbe7a"} Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.668314 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zqh8w" event={"ID":"baed05bb-6058-489e-b1ad-424333b94494","Type":"ContainerStarted","Data":"0611ab74ca4f45ebe489957e87ea37f77f157e0f225af4ccd3badafe2ef8b5c5"} Jan 30 12:15:01 crc kubenswrapper[4703]: I0130 12:15:01.672106 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mfgtf" event={"ID":"dfa2e6c9-10c2-48ee-8aed-315d5bf5b1c5","Type":"ContainerStarted","Data":"72272ba16db90f550f398f338e6dc336a52dae52da5ab3994175a8d5a34dbeb9"} Jan 30 12:15:01 crc kubenswrapper[4703]: E0130 12:15:01.718194 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mfgtf" podUID="dfa2e6c9-10c2-48ee-8aed-315d5bf5b1c5" Jan 30 12:15:02 crc kubenswrapper[4703]: I0130 12:15:02.117287 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2\" (UID: \"98241c60-d78a-4a93-bfb8-a061e65c7c83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" Jan 30 12:15:02 crc kubenswrapper[4703]: E0130 12:15:02.117515 4703 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 30 12:15:02 crc kubenswrapper[4703]: E0130 12:15:02.118547 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert podName:98241c60-d78a-4a93-bfb8-a061e65c7c83 nodeName:}" failed. No retries permitted until 2026-01-30 12:15:06.11800877 +0000 UTC m=+1141.895830424 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" (UID: "98241c60-d78a-4a93-bfb8-a061e65c7c83") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 30 12:15:02 crc kubenswrapper[4703]: I0130 12:15:02.741991 4703 generic.go:334] "Generic (PLEG): container finished" podID="b063a5d7-e03b-46db-85b0-75adab6bca8f" containerID="57c983168859c3664156a0a6cc0727dedbdc2a5db854d7e49460e92cda694768" exitCode=0 Jan 30 12:15:02 crc kubenswrapper[4703]: I0130 12:15:02.742182 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42" event={"ID":"b063a5d7-e03b-46db-85b0-75adab6bca8f","Type":"ContainerDied","Data":"57c983168859c3664156a0a6cc0727dedbdc2a5db854d7e49460e92cda694768"} Jan 30 12:15:02 crc kubenswrapper[4703]: I0130 12:15:02.746569 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-79c984db64-qk88l" event={"ID":"7f8888ec-800b-41b1-a297-576a28957668","Type":"ContainerStarted","Data":"82afe22a9e85bfc5434667b5cc3f97db8220f86c28e281b4a5e0cd292a098ba5"} Jan 30 12:15:02 crc kubenswrapper[4703]: E0130 12:15:02.751596 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mfgtf" podUID="dfa2e6c9-10c2-48ee-8aed-315d5bf5b1c5" Jan 30 12:15:02 crc kubenswrapper[4703]: E0130 12:15:02.752079 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:e6f2f361f1dcbb321407a5884951e16ff96e7b88942b10b548f27ad4de14a0be\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-nxm9c" podUID="31df4dd4-aab4-4e73-9229-c78a40e82eda" Jan 30 12:15:03 crc kubenswrapper[4703]: I0130 12:15:03.096546 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:15:03 crc kubenswrapper[4703]: E0130 12:15:03.096731 4703 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 30 12:15:03 crc kubenswrapper[4703]: I0130 12:15:03.096777 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:15:03 crc kubenswrapper[4703]: E0130 12:15:03.096797 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs podName:9d4e1655-d610-4867-b435-4ec960bb483c nodeName:}" failed. No retries permitted until 2026-01-30 12:15:07.096779546 +0000 UTC m=+1142.874601200 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs") pod "openstack-operator-controller-manager-6987d5d556-cff79" (UID: "9d4e1655-d610-4867-b435-4ec960bb483c") : secret "webhook-server-cert" not found Jan 30 12:15:03 crc kubenswrapper[4703]: E0130 12:15:03.096903 4703 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 30 12:15:03 crc kubenswrapper[4703]: E0130 12:15:03.096958 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs podName:9d4e1655-d610-4867-b435-4ec960bb483c nodeName:}" failed. No retries permitted until 2026-01-30 12:15:07.096944721 +0000 UTC m=+1142.874766375 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs") pod "openstack-operator-controller-manager-6987d5d556-cff79" (UID: "9d4e1655-d610-4867-b435-4ec960bb483c") : secret "metrics-server-cert" not found Jan 30 12:15:05 crc kubenswrapper[4703]: I0130 12:15:05.581450 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert\") pod \"infra-operator-controller-manager-79955696d6-z2td9\" (UID: \"fd6c3383-802c-4e61-9dba-3f691d8d0fbc\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" Jan 30 12:15:05 crc kubenswrapper[4703]: E0130 12:15:05.581873 4703 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 30 12:15:05 crc kubenswrapper[4703]: E0130 12:15:05.582217 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert podName:fd6c3383-802c-4e61-9dba-3f691d8d0fbc nodeName:}" failed. No retries permitted until 2026-01-30 12:15:13.582190833 +0000 UTC m=+1149.360012487 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert") pod "infra-operator-controller-manager-79955696d6-z2td9" (UID: "fd6c3383-802c-4e61-9dba-3f691d8d0fbc") : secret "infra-operator-webhook-server-cert" not found Jan 30 12:15:06 crc kubenswrapper[4703]: I0130 12:15:06.199115 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2\" (UID: \"98241c60-d78a-4a93-bfb8-a061e65c7c83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" Jan 30 12:15:06 crc kubenswrapper[4703]: E0130 12:15:06.199377 4703 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 30 12:15:06 crc kubenswrapper[4703]: E0130 12:15:06.200676 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert podName:98241c60-d78a-4a93-bfb8-a061e65c7c83 nodeName:}" failed. No retries permitted until 2026-01-30 12:15:14.200635418 +0000 UTC m=+1149.978457072 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" (UID: "98241c60-d78a-4a93-bfb8-a061e65c7c83") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 30 12:15:07 crc kubenswrapper[4703]: I0130 12:15:07.118663 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:15:07 crc kubenswrapper[4703]: I0130 12:15:07.118758 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:15:07 crc kubenswrapper[4703]: E0130 12:15:07.118944 4703 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 30 12:15:07 crc kubenswrapper[4703]: E0130 12:15:07.119038 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs podName:9d4e1655-d610-4867-b435-4ec960bb483c nodeName:}" failed. No retries permitted until 2026-01-30 12:15:15.119016396 +0000 UTC m=+1150.896838050 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs") pod "openstack-operator-controller-manager-6987d5d556-cff79" (UID: "9d4e1655-d610-4867-b435-4ec960bb483c") : secret "webhook-server-cert" not found Jan 30 12:15:07 crc kubenswrapper[4703]: E0130 12:15:07.119575 4703 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 30 12:15:07 crc kubenswrapper[4703]: E0130 12:15:07.119609 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs podName:9d4e1655-d610-4867-b435-4ec960bb483c nodeName:}" failed. No retries permitted until 2026-01-30 12:15:15.119599213 +0000 UTC m=+1150.897420867 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs") pod "openstack-operator-controller-manager-6987d5d556-cff79" (UID: "9d4e1655-d610-4867-b435-4ec960bb483c") : secret "metrics-server-cert" not found Jan 30 12:15:07 crc kubenswrapper[4703]: I0130 12:15:07.203885 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42" Jan 30 12:15:07 crc kubenswrapper[4703]: I0130 12:15:07.324841 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b063a5d7-e03b-46db-85b0-75adab6bca8f-config-volume\") pod \"b063a5d7-e03b-46db-85b0-75adab6bca8f\" (UID: \"b063a5d7-e03b-46db-85b0-75adab6bca8f\") " Jan 30 12:15:07 crc kubenswrapper[4703]: I0130 12:15:07.325016 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkrj5\" (UniqueName: \"kubernetes.io/projected/b063a5d7-e03b-46db-85b0-75adab6bca8f-kube-api-access-zkrj5\") pod \"b063a5d7-e03b-46db-85b0-75adab6bca8f\" (UID: \"b063a5d7-e03b-46db-85b0-75adab6bca8f\") " Jan 30 12:15:07 crc kubenswrapper[4703]: I0130 12:15:07.325238 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b063a5d7-e03b-46db-85b0-75adab6bca8f-secret-volume\") pod \"b063a5d7-e03b-46db-85b0-75adab6bca8f\" (UID: \"b063a5d7-e03b-46db-85b0-75adab6bca8f\") " Jan 30 12:15:07 crc kubenswrapper[4703]: I0130 12:15:07.326578 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b063a5d7-e03b-46db-85b0-75adab6bca8f-config-volume" (OuterVolumeSpecName: "config-volume") pod "b063a5d7-e03b-46db-85b0-75adab6bca8f" (UID: "b063a5d7-e03b-46db-85b0-75adab6bca8f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:15:07 crc kubenswrapper[4703]: I0130 12:15:07.340664 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b063a5d7-e03b-46db-85b0-75adab6bca8f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b063a5d7-e03b-46db-85b0-75adab6bca8f" (UID: "b063a5d7-e03b-46db-85b0-75adab6bca8f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:15:07 crc kubenswrapper[4703]: I0130 12:15:07.341548 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b063a5d7-e03b-46db-85b0-75adab6bca8f-kube-api-access-zkrj5" (OuterVolumeSpecName: "kube-api-access-zkrj5") pod "b063a5d7-e03b-46db-85b0-75adab6bca8f" (UID: "b063a5d7-e03b-46db-85b0-75adab6bca8f"). InnerVolumeSpecName "kube-api-access-zkrj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:15:07 crc kubenswrapper[4703]: I0130 12:15:07.430568 4703 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b063a5d7-e03b-46db-85b0-75adab6bca8f-config-volume\") on node \"crc\" DevicePath \"\"" Jan 30 12:15:07 crc kubenswrapper[4703]: I0130 12:15:07.430628 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkrj5\" (UniqueName: \"kubernetes.io/projected/b063a5d7-e03b-46db-85b0-75adab6bca8f-kube-api-access-zkrj5\") on node \"crc\" DevicePath \"\"" Jan 30 12:15:07 crc kubenswrapper[4703]: I0130 12:15:07.430645 4703 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b063a5d7-e03b-46db-85b0-75adab6bca8f-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 30 12:15:07 crc kubenswrapper[4703]: I0130 12:15:07.842945 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42" event={"ID":"b063a5d7-e03b-46db-85b0-75adab6bca8f","Type":"ContainerDied","Data":"a55e6f30af95bbbaf9dd324bdacb39d732319100a255ab7331ed6f25bef45f7d"} Jan 30 12:15:07 crc kubenswrapper[4703]: I0130 12:15:07.843009 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a55e6f30af95bbbaf9dd324bdacb39d732319100a255ab7331ed6f25bef45f7d" Jan 30 12:15:07 crc kubenswrapper[4703]: I0130 12:15:07.843080 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42" Jan 30 12:15:13 crc kubenswrapper[4703]: I0130 12:15:13.670507 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert\") pod \"infra-operator-controller-manager-79955696d6-z2td9\" (UID: \"fd6c3383-802c-4e61-9dba-3f691d8d0fbc\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" Jan 30 12:15:13 crc kubenswrapper[4703]: I0130 12:15:13.680675 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fd6c3383-802c-4e61-9dba-3f691d8d0fbc-cert\") pod \"infra-operator-controller-manager-79955696d6-z2td9\" (UID: \"fd6c3383-802c-4e61-9dba-3f691d8d0fbc\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" Jan 30 12:15:13 crc kubenswrapper[4703]: I0130 12:15:13.807317 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-jz68n" Jan 30 12:15:13 crc kubenswrapper[4703]: I0130 12:15:13.814926 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" Jan 30 12:15:14 crc kubenswrapper[4703]: I0130 12:15:14.282880 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2\" (UID: \"98241c60-d78a-4a93-bfb8-a061e65c7c83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" Jan 30 12:15:14 crc kubenswrapper[4703]: E0130 12:15:14.283102 4703 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 30 12:15:14 crc kubenswrapper[4703]: E0130 12:15:14.283193 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert podName:98241c60-d78a-4a93-bfb8-a061e65c7c83 nodeName:}" failed. No retries permitted until 2026-01-30 12:15:30.283174154 +0000 UTC m=+1166.060995798 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" (UID: "98241c60-d78a-4a93-bfb8-a061e65c7c83") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 30 12:15:15 crc kubenswrapper[4703]: I0130 12:15:15.200179 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:15:15 crc kubenswrapper[4703]: E0130 12:15:15.200452 4703 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 30 12:15:15 crc kubenswrapper[4703]: I0130 12:15:15.200744 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:15:15 crc kubenswrapper[4703]: E0130 12:15:15.200795 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs podName:9d4e1655-d610-4867-b435-4ec960bb483c nodeName:}" failed. No retries permitted until 2026-01-30 12:15:31.200758991 +0000 UTC m=+1166.978580815 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs") pod "openstack-operator-controller-manager-6987d5d556-cff79" (UID: "9d4e1655-d610-4867-b435-4ec960bb483c") : secret "webhook-server-cert" not found Jan 30 12:15:15 crc kubenswrapper[4703]: E0130 12:15:15.200925 4703 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 30 12:15:15 crc kubenswrapper[4703]: E0130 12:15:15.201005 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs podName:9d4e1655-d610-4867-b435-4ec960bb483c nodeName:}" failed. No retries permitted until 2026-01-30 12:15:31.200983937 +0000 UTC m=+1166.978805591 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs") pod "openstack-operator-controller-manager-6987d5d556-cff79" (UID: "9d4e1655-d610-4867-b435-4ec960bb483c") : secret "metrics-server-cert" not found Jan 30 12:15:20 crc kubenswrapper[4703]: E0130 12:15:20.732237 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:bead175f27e5f074f723694f3b66e5aa7238411bf8a27a267b9a2936e4465521" Jan 30 12:15:20 crc kubenswrapper[4703]: E0130 12:15:20.733525 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:bead175f27e5f074f723694f3b66e5aa7238411bf8a27a267b9a2936e4465521,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qw4dn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-5f4b8bd54d-p245s_openstack-operators(0e5cd4fd-45a1-4cdb-9317-35abf01f5c33): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:15:20 crc kubenswrapper[4703]: E0130 12:15:20.734776 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-p245s" podUID="0e5cd4fd-45a1-4cdb-9317-35abf01f5c33" Jan 30 12:15:21 crc kubenswrapper[4703]: E0130 12:15:21.088717 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ironic-operator@sha256:bead175f27e5f074f723694f3b66e5aa7238411bf8a27a267b9a2936e4465521\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-p245s" podUID="0e5cd4fd-45a1-4cdb-9317-35abf01f5c33" Jan 30 12:15:22 crc kubenswrapper[4703]: E0130 12:15:22.804430 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:27d83ada27cf70cda0c5738f97551d81f1ea4068e83a090f3312e22172d72e10" Jan 30 12:15:22 crc kubenswrapper[4703]: E0130 12:15:22.804963 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:27d83ada27cf70cda0c5738f97551d81f1ea4068e83a090f3312e22172d72e10,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wh6qz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-69d6db494d-fzxq8_openstack-operators(2f2ed0c3-f32f-4402-9e49-3ef2c200c73a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:15:22 crc kubenswrapper[4703]: E0130 12:15:22.806211 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-fzxq8" podUID="2f2ed0c3-f32f-4402-9e49-3ef2c200c73a" Jan 30 12:15:22 crc kubenswrapper[4703]: E0130 12:15:22.987311 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:27d83ada27cf70cda0c5738f97551d81f1ea4068e83a090f3312e22172d72e10\\\"\"" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-fzxq8" podUID="2f2ed0c3-f32f-4402-9e49-3ef2c200c73a" Jan 30 12:15:23 crc kubenswrapper[4703]: E0130 12:15:23.636912 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:d9f6f8dc6a6dd9b0d7c96e4c89b3056291fd61f11126a1304256a4d6cacd0382" Jan 30 12:15:23 crc kubenswrapper[4703]: E0130 12:15:23.637168 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:d9f6f8dc6a6dd9b0d7c96e4c89b3056291fd61f11126a1304256a4d6cacd0382,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mw9sm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-6d9697b7f4-wxxzb_openstack-operators(918fbb0a-6011-4785-8b99-c69ef91af7ef): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:15:23 crc kubenswrapper[4703]: E0130 12:15:23.638567 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-wxxzb" podUID="918fbb0a-6011-4785-8b99-c69ef91af7ef" Jan 30 12:15:24 crc kubenswrapper[4703]: E0130 12:15:23.996907 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/designate-operator@sha256:d9f6f8dc6a6dd9b0d7c96e4c89b3056291fd61f11126a1304256a4d6cacd0382\\\"\"" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-wxxzb" podUID="918fbb0a-6011-4785-8b99-c69ef91af7ef" Jan 30 12:15:24 crc kubenswrapper[4703]: E0130 12:15:24.452780 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241" Jan 30 12:15:24 crc kubenswrapper[4703]: E0130 12:15:24.453069 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hlcdz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-56f8bfcd9f-x2z8q_openstack-operators(842c0b1e-43ca-49b3-bcc0-a5f9714773ac): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:15:24 crc kubenswrapper[4703]: E0130 12:15:24.454385 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-x2z8q" podUID="842c0b1e-43ca-49b3-bcc0-a5f9714773ac" Jan 30 12:15:25 crc kubenswrapper[4703]: E0130 12:15:25.005696 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241\\\"\"" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-x2z8q" podUID="842c0b1e-43ca-49b3-bcc0-a5f9714773ac" Jan 30 12:15:25 crc kubenswrapper[4703]: E0130 12:15:25.137684 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:42ad717de1b82267d244b016e5491a5b66a5c3deb6b8c2906a379e1296a2c382" Jan 30 12:15:25 crc kubenswrapper[4703]: E0130 12:15:25.138636 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:42ad717de1b82267d244b016e5491a5b66a5c3deb6b8c2906a379e1296a2c382,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rpvdl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-68fc8c869-hw7pr_openstack-operators(f1787cd3-2c84-43bc-87dd-5356e44ba9cd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:15:25 crc kubenswrapper[4703]: E0130 12:15:25.140066 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-hw7pr" podUID="f1787cd3-2c84-43bc-87dd-5356e44ba9cd" Jan 30 12:15:25 crc kubenswrapper[4703]: E0130 12:15:25.664712 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:bbb46b8b3b69fdfad7bafc10a7e88f6ea58bcdc3c91e30beb79e24417d52e0f6" Jan 30 12:15:25 crc kubenswrapper[4703]: E0130 12:15:25.665038 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:bbb46b8b3b69fdfad7bafc10a7e88f6ea58bcdc3c91e30beb79e24417d52e0f6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-phqzr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-585dbc889-w9kvh_openstack-operators(a530abd9-3d62-4c65-b9e8-190208cbefd4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:15:25 crc kubenswrapper[4703]: E0130 12:15:25.666312 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-w9kvh" podUID="a530abd9-3d62-4c65-b9e8-190208cbefd4" Jan 30 12:15:26 crc kubenswrapper[4703]: E0130 12:15:26.013436 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:42ad717de1b82267d244b016e5491a5b66a5c3deb6b8c2906a379e1296a2c382\\\"\"" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-hw7pr" podUID="f1787cd3-2c84-43bc-87dd-5356e44ba9cd" Jan 30 12:15:26 crc kubenswrapper[4703]: E0130 12:15:26.013614 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:bbb46b8b3b69fdfad7bafc10a7e88f6ea58bcdc3c91e30beb79e24417d52e0f6\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-w9kvh" podUID="a530abd9-3d62-4c65-b9e8-190208cbefd4" Jan 30 12:15:26 crc kubenswrapper[4703]: E0130 12:15:26.245519 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488" Jan 30 12:15:26 crc kubenswrapper[4703]: E0130 12:15:26.245770 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-k9s5g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5b964cf4cd-qzmpv_openstack-operators(2d50762e-d50c-4448-bb9c-19136516a622): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:15:26 crc kubenswrapper[4703]: E0130 12:15:26.247053 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-qzmpv" podUID="2d50762e-d50c-4448-bb9c-19136516a622" Jan 30 12:15:27 crc kubenswrapper[4703]: E0130 12:15:27.018625 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-qzmpv" podUID="2d50762e-d50c-4448-bb9c-19136516a622" Jan 30 12:15:27 crc kubenswrapper[4703]: E0130 12:15:27.179895 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:379470e2752f286e73908e94233e884922b231169a5521a59f53843a2dc3184c" Jan 30 12:15:27 crc kubenswrapper[4703]: E0130 12:15:27.180271 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:379470e2752f286e73908e94233e884922b231169a5521a59f53843a2dc3184c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-t8vd4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-7b6c4d8c5f-2l6qz_openstack-operators(7434f3b6-d77f-48b7-8ceb-f084a9c283f3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:15:27 crc kubenswrapper[4703]: E0130 12:15:27.181628 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-2l6qz" podUID="7434f3b6-d77f-48b7-8ceb-f084a9c283f3" Jan 30 12:15:27 crc kubenswrapper[4703]: E0130 12:15:27.802807 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:1f593e8d49d02b6484c89632192ae54771675c54fbd8426e3675b8e20ecfd7c4" Jan 30 12:15:27 crc kubenswrapper[4703]: E0130 12:15:27.803666 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:1f593e8d49d02b6484c89632192ae54771675c54fbd8426e3675b8e20ecfd7c4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xrp9j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-8886f4c47-vb4kb_openstack-operators(a634d819-9927-4b82-8ec1-959ca5f19908): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:15:27 crc kubenswrapper[4703]: E0130 12:15:27.804857 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-vb4kb" podUID="a634d819-9927-4b82-8ec1-959ca5f19908" Jan 30 12:15:27 crc kubenswrapper[4703]: E0130 12:15:27.907321 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.129.56.193:5001/openstack-k8s-operators/watcher-operator:5719ab92fbb1460dda1b70b3edc9c668644816a6" Jan 30 12:15:27 crc kubenswrapper[4703]: E0130 12:15:27.907428 4703 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.129.56.193:5001/openstack-k8s-operators/watcher-operator:5719ab92fbb1460dda1b70b3edc9c668644816a6" Jan 30 12:15:27 crc kubenswrapper[4703]: E0130 12:15:27.907645 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.129.56.193:5001/openstack-k8s-operators/watcher-operator:5719ab92fbb1460dda1b70b3edc9c668644816a6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tg4jh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-79c984db64-qk88l_openstack-operators(7f8888ec-800b-41b1-a297-576a28957668): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:15:27 crc kubenswrapper[4703]: E0130 12:15:27.908869 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/watcher-operator-controller-manager-79c984db64-qk88l" podUID="7f8888ec-800b-41b1-a297-576a28957668" Jan 30 12:15:28 crc kubenswrapper[4703]: E0130 12:15:28.025533 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:1f593e8d49d02b6484c89632192ae54771675c54fbd8426e3675b8e20ecfd7c4\\\"\"" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-vb4kb" podUID="a634d819-9927-4b82-8ec1-959ca5f19908" Jan 30 12:15:28 crc kubenswrapper[4703]: E0130 12:15:28.030036 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/barbican-operator@sha256:379470e2752f286e73908e94233e884922b231169a5521a59f53843a2dc3184c\\\"\"" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-2l6qz" podUID="7434f3b6-d77f-48b7-8ceb-f084a9c283f3" Jan 30 12:15:28 crc kubenswrapper[4703]: E0130 12:15:28.030558 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.129.56.193:5001/openstack-k8s-operators/watcher-operator:5719ab92fbb1460dda1b70b3edc9c668644816a6\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-79c984db64-qk88l" podUID="7f8888ec-800b-41b1-a297-576a28957668" Jan 30 12:15:28 crc kubenswrapper[4703]: E0130 12:15:28.557798 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:319c969e88f109b26487a9f5a67203682803d7386424703ab7ca0340be99ae17" Jan 30 12:15:28 crc kubenswrapper[4703]: E0130 12:15:28.558109 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:319c969e88f109b26487a9f5a67203682803d7386424703ab7ca0340be99ae17,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nq8mh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-84f48565d4-xv5lq_openstack-operators(9cc112c6-c3ca-4d9c-ab24-178578e1a41f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:15:28 crc kubenswrapper[4703]: E0130 12:15:28.559620 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-xv5lq" podUID="9cc112c6-c3ca-4d9c-ab24-178578e1a41f" Jan 30 12:15:29 crc kubenswrapper[4703]: E0130 12:15:29.047681 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:319c969e88f109b26487a9f5a67203682803d7386424703ab7ca0340be99ae17\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-xv5lq" podUID="9cc112c6-c3ca-4d9c-ab24-178578e1a41f" Jan 30 12:15:30 crc kubenswrapper[4703]: I0130 12:15:30.327419 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2\" (UID: \"98241c60-d78a-4a93-bfb8-a061e65c7c83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" Jan 30 12:15:30 crc kubenswrapper[4703]: I0130 12:15:30.337846 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/98241c60-d78a-4a93-bfb8-a061e65c7c83-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2\" (UID: \"98241c60-d78a-4a93-bfb8-a061e65c7c83\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" Jan 30 12:15:30 crc kubenswrapper[4703]: I0130 12:15:30.442374 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-bzdlb" Jan 30 12:15:30 crc kubenswrapper[4703]: I0130 12:15:30.449829 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" Jan 30 12:15:31 crc kubenswrapper[4703]: I0130 12:15:31.246881 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:15:31 crc kubenswrapper[4703]: I0130 12:15:31.247792 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:15:31 crc kubenswrapper[4703]: I0130 12:15:31.261051 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-webhook-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:15:31 crc kubenswrapper[4703]: I0130 12:15:31.262979 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d4e1655-d610-4867-b435-4ec960bb483c-metrics-certs\") pod \"openstack-operator-controller-manager-6987d5d556-cff79\" (UID: \"9d4e1655-d610-4867-b435-4ec960bb483c\") " pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:15:31 crc kubenswrapper[4703]: I0130 12:15:31.350024 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-z2td9"] Jan 30 12:15:31 crc kubenswrapper[4703]: W0130 12:15:31.369981 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd6c3383_802c_4e61_9dba_3f691d8d0fbc.slice/crio-573efe47e74ecfd8aee145da07ed2a465e482242f2436525cd7e4eec8f3d7bcc WatchSource:0}: Error finding container 573efe47e74ecfd8aee145da07ed2a465e482242f2436525cd7e4eec8f3d7bcc: Status 404 returned error can't find the container with id 573efe47e74ecfd8aee145da07ed2a465e482242f2436525cd7e4eec8f3d7bcc Jan 30 12:15:31 crc kubenswrapper[4703]: I0130 12:15:31.456273 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2"] Jan 30 12:15:31 crc kubenswrapper[4703]: I0130 12:15:31.462015 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-q6xxw" Jan 30 12:15:31 crc kubenswrapper[4703]: I0130 12:15:31.468593 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:15:31 crc kubenswrapper[4703]: W0130 12:15:31.486409 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98241c60_d78a_4a93_bfb8_a061e65c7c83.slice/crio-6720a63d48b38d46371931f11f00016806bbd5b894d2e0a5a0f2926e7474b343 WatchSource:0}: Error finding container 6720a63d48b38d46371931f11f00016806bbd5b894d2e0a5a0f2926e7474b343: Status 404 returned error can't find the container with id 6720a63d48b38d46371931f11f00016806bbd5b894d2e0a5a0f2926e7474b343 Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.094292 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zzdzn" event={"ID":"bb4211ca-5463-47ec-851d-86f23ff74397","Type":"ContainerStarted","Data":"8182ecc3b2bf449929aa3ebc4ce63c21e59ca1aa4e8b91034ee1a1c5cd5ee664"} Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.094861 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zzdzn" Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.100676 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" event={"ID":"fd6c3383-802c-4e61-9dba-3f691d8d0fbc","Type":"ContainerStarted","Data":"573efe47e74ecfd8aee145da07ed2a465e482242f2436525cd7e4eec8f3d7bcc"} Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.104466 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" event={"ID":"98241c60-d78a-4a93-bfb8-a061e65c7c83","Type":"ContainerStarted","Data":"6720a63d48b38d46371931f11f00016806bbd5b894d2e0a5a0f2926e7474b343"} Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.106257 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zqh8w" event={"ID":"baed05bb-6058-489e-b1ad-424333b94494","Type":"ContainerStarted","Data":"0a0a43b92219870bb92e5e449cb5bd4348502bd975ccc4a48706c675c4d4302e"} Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.106623 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zqh8w" Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.120315 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pdt6v" event={"ID":"6219460f-6432-4743-984d-e3c0ce8d4538","Type":"ContainerStarted","Data":"9d0600efd374d97ff371bac9a5850d5e7845c2786221edf0276459835eeb98be"} Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.121321 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pdt6v" Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.122770 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zzdzn" podStartSLOduration=7.18264734 podStartE2EDuration="35.122741379s" podCreationTimestamp="2026-01-30 12:14:57 +0000 UTC" firstStartedPulling="2026-01-30 12:15:01.325282248 +0000 UTC m=+1137.103103902" lastFinishedPulling="2026-01-30 12:15:29.265376287 +0000 UTC m=+1165.043197941" observedRunningTime="2026-01-30 12:15:32.119658626 +0000 UTC m=+1167.897480290" watchObservedRunningTime="2026-01-30 12:15:32.122741379 +0000 UTC m=+1167.900563033" Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.126216 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-x7cwt" event={"ID":"e0183650-4de1-4a80-a310-3313864fae9b","Type":"ContainerStarted","Data":"8c719f11e7d8a66ab411907c6bb4d431c77cfb6100219e4154ea1792a89f8f40"} Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.127262 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-x7cwt" Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.130971 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-cwdkz" event={"ID":"425e555c-7283-4169-9293-e380104d38ca","Type":"ContainerStarted","Data":"9fa63b44ce1b6784ce2532771aa5a26d1adc0af75a7cf4093f997addbe8cf62a"} Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.131947 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-cwdkz" Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.134452 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-sbcr8" event={"ID":"d09ba286-529c-4d2e-b56a-7c8efaff7fec","Type":"ContainerStarted","Data":"27cb8647603295ad9bb9be844c786f4b081a0cbf9a4bcbc8b9e94c17b96076f5"} Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.135034 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-sbcr8" Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.137111 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-q26rs" event={"ID":"61661318-fcab-41f8-a672-7fe2b6cfa1ce","Type":"ContainerStarted","Data":"c6670bcdfc7d92d4d4b857937a40facc3341e469dc62bc1c2c97805084781265"} Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.137828 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-q26rs" Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.171227 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mfgtf" event={"ID":"dfa2e6c9-10c2-48ee-8aed-315d5bf5b1c5","Type":"ContainerStarted","Data":"32d9d96558d7a496f5c5d1e5c320b5508b4873f04cedc0e1124de20822a6e0f7"} Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.183724 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-nxm9c" event={"ID":"31df4dd4-aab4-4e73-9229-c78a40e82eda","Type":"ContainerStarted","Data":"18a4b91b8e2cea6bfb8e80d9f40d6edcace63d247a54ca23f11c5d4388e3ee73"} Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.184802 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-nxm9c" Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.185333 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-cwdkz" podStartSLOduration=6.333231055 podStartE2EDuration="34.185313513s" podCreationTimestamp="2026-01-30 12:14:58 +0000 UTC" firstStartedPulling="2026-01-30 12:15:01.407984437 +0000 UTC m=+1137.185806081" lastFinishedPulling="2026-01-30 12:15:29.260066885 +0000 UTC m=+1165.037888539" observedRunningTime="2026-01-30 12:15:32.185114418 +0000 UTC m=+1167.962936062" watchObservedRunningTime="2026-01-30 12:15:32.185313513 +0000 UTC m=+1167.963135187" Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.185853 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zqh8w" podStartSLOduration=7.320863172 podStartE2EDuration="35.185844367s" podCreationTimestamp="2026-01-30 12:14:57 +0000 UTC" firstStartedPulling="2026-01-30 12:15:01.395644894 +0000 UTC m=+1137.173466548" lastFinishedPulling="2026-01-30 12:15:29.260626089 +0000 UTC m=+1165.038447743" observedRunningTime="2026-01-30 12:15:32.150679511 +0000 UTC m=+1167.928501265" watchObservedRunningTime="2026-01-30 12:15:32.185844367 +0000 UTC m=+1167.963666021" Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.248178 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-sbcr8" podStartSLOduration=6.905525928 podStartE2EDuration="35.248144174s" podCreationTimestamp="2026-01-30 12:14:57 +0000 UTC" firstStartedPulling="2026-01-30 12:15:00.918112346 +0000 UTC m=+1136.695934000" lastFinishedPulling="2026-01-30 12:15:29.260730592 +0000 UTC m=+1165.038552246" observedRunningTime="2026-01-30 12:15:32.237690942 +0000 UTC m=+1168.015512596" watchObservedRunningTime="2026-01-30 12:15:32.248144174 +0000 UTC m=+1168.025965838" Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.250211 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79"] Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.298402 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-x7cwt" podStartSLOduration=7.441741627 podStartE2EDuration="35.298346175s" podCreationTimestamp="2026-01-30 12:14:57 +0000 UTC" firstStartedPulling="2026-01-30 12:15:01.404195566 +0000 UTC m=+1137.182017220" lastFinishedPulling="2026-01-30 12:15:29.260800114 +0000 UTC m=+1165.038621768" observedRunningTime="2026-01-30 12:15:32.278707327 +0000 UTC m=+1168.056528981" watchObservedRunningTime="2026-01-30 12:15:32.298346175 +0000 UTC m=+1168.076167829" Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.324648 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pdt6v" podStartSLOduration=5.055797649 podStartE2EDuration="35.324614671s" podCreationTimestamp="2026-01-30 12:14:57 +0000 UTC" firstStartedPulling="2026-01-30 12:14:58.99191541 +0000 UTC m=+1134.769737064" lastFinishedPulling="2026-01-30 12:15:29.260732442 +0000 UTC m=+1165.038554086" observedRunningTime="2026-01-30 12:15:32.306554045 +0000 UTC m=+1168.084375699" watchObservedRunningTime="2026-01-30 12:15:32.324614671 +0000 UTC m=+1168.102436325" Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.338595 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-q26rs" podStartSLOduration=7.052867915 podStartE2EDuration="35.338564877s" podCreationTimestamp="2026-01-30 12:14:57 +0000 UTC" firstStartedPulling="2026-01-30 12:15:00.979632354 +0000 UTC m=+1136.757454018" lastFinishedPulling="2026-01-30 12:15:29.265329326 +0000 UTC m=+1165.043150980" observedRunningTime="2026-01-30 12:15:32.338060544 +0000 UTC m=+1168.115882218" watchObservedRunningTime="2026-01-30 12:15:32.338564877 +0000 UTC m=+1168.116386531" Jan 30 12:15:32 crc kubenswrapper[4703]: I0130 12:15:32.387424 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-nxm9c" podStartSLOduration=5.847401675 podStartE2EDuration="35.387396971s" podCreationTimestamp="2026-01-30 12:14:57 +0000 UTC" firstStartedPulling="2026-01-30 12:15:01.515593187 +0000 UTC m=+1137.293414841" lastFinishedPulling="2026-01-30 12:15:31.055588483 +0000 UTC m=+1166.833410137" observedRunningTime="2026-01-30 12:15:32.380178967 +0000 UTC m=+1168.158000631" watchObservedRunningTime="2026-01-30 12:15:32.387396971 +0000 UTC m=+1168.165218625" Jan 30 12:15:33 crc kubenswrapper[4703]: I0130 12:15:33.210268 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" event={"ID":"9d4e1655-d610-4867-b435-4ec960bb483c","Type":"ContainerStarted","Data":"eef1877125b99d996345eabab179444ddb6e8cd3ffcc0359056ba5ecc03bb2b8"} Jan 30 12:15:33 crc kubenswrapper[4703]: I0130 12:15:33.210350 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" event={"ID":"9d4e1655-d610-4867-b435-4ec960bb483c","Type":"ContainerStarted","Data":"d227b50c06b32f3ac1a81d8bce83f653634318590077e71cecc8ad8acc6b7e58"} Jan 30 12:15:33 crc kubenswrapper[4703]: I0130 12:15:33.295030 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-mfgtf" podStartSLOduration=4.5597963230000005 podStartE2EDuration="34.294994873s" podCreationTimestamp="2026-01-30 12:14:59 +0000 UTC" firstStartedPulling="2026-01-30 12:15:01.489797932 +0000 UTC m=+1137.267619586" lastFinishedPulling="2026-01-30 12:15:31.224996482 +0000 UTC m=+1167.002818136" observedRunningTime="2026-01-30 12:15:32.411724936 +0000 UTC m=+1168.189546580" watchObservedRunningTime="2026-01-30 12:15:33.294994873 +0000 UTC m=+1169.072816527" Jan 30 12:15:34 crc kubenswrapper[4703]: I0130 12:15:34.121501 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" podStartSLOduration=36.121477434 podStartE2EDuration="36.121477434s" podCreationTimestamp="2026-01-30 12:14:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:15:33.300176943 +0000 UTC m=+1169.077998587" watchObservedRunningTime="2026-01-30 12:15:34.121477434 +0000 UTC m=+1169.899299078" Jan 30 12:15:34 crc kubenswrapper[4703]: I0130 12:15:34.251373 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:15:36 crc kubenswrapper[4703]: I0130 12:15:36.271536 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" event={"ID":"98241c60-d78a-4a93-bfb8-a061e65c7c83","Type":"ContainerStarted","Data":"1feba9550ffc135491e8b5783dcdee87119ddf29a18cdc3a55583987766b9e8c"} Jan 30 12:15:36 crc kubenswrapper[4703]: I0130 12:15:36.272252 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" Jan 30 12:15:36 crc kubenswrapper[4703]: I0130 12:15:36.273715 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-wxxzb" event={"ID":"918fbb0a-6011-4785-8b99-c69ef91af7ef","Type":"ContainerStarted","Data":"8e1a9ec2d437ea69be6d017331ff2044fc32ab9cf54fd9b4c2aba1f2331f4492"} Jan 30 12:15:36 crc kubenswrapper[4703]: I0130 12:15:36.273965 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-wxxzb" Jan 30 12:15:36 crc kubenswrapper[4703]: I0130 12:15:36.276281 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" event={"ID":"fd6c3383-802c-4e61-9dba-3f691d8d0fbc","Type":"ContainerStarted","Data":"2197f161323bd5bac07443c71f6e500bfa762caf704b5f3aa5592c34e65d77bf"} Jan 30 12:15:36 crc kubenswrapper[4703]: I0130 12:15:36.277082 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" Jan 30 12:15:36 crc kubenswrapper[4703]: I0130 12:15:36.304791 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" podStartSLOduration=34.791084015 podStartE2EDuration="39.304759064s" podCreationTimestamp="2026-01-30 12:14:57 +0000 UTC" firstStartedPulling="2026-01-30 12:15:31.491248227 +0000 UTC m=+1167.269069881" lastFinishedPulling="2026-01-30 12:15:36.004923276 +0000 UTC m=+1171.782744930" observedRunningTime="2026-01-30 12:15:36.299354149 +0000 UTC m=+1172.077175803" watchObservedRunningTime="2026-01-30 12:15:36.304759064 +0000 UTC m=+1172.082580718" Jan 30 12:15:36 crc kubenswrapper[4703]: I0130 12:15:36.355904 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-wxxzb" podStartSLOduration=3.700319313 podStartE2EDuration="39.355874889s" podCreationTimestamp="2026-01-30 12:14:57 +0000 UTC" firstStartedPulling="2026-01-30 12:15:00.34862809 +0000 UTC m=+1136.126449744" lastFinishedPulling="2026-01-30 12:15:36.004183666 +0000 UTC m=+1171.782005320" observedRunningTime="2026-01-30 12:15:36.331219187 +0000 UTC m=+1172.109040841" watchObservedRunningTime="2026-01-30 12:15:36.355874889 +0000 UTC m=+1172.133696533" Jan 30 12:15:36 crc kubenswrapper[4703]: I0130 12:15:36.361933 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" podStartSLOduration=34.774365195 podStartE2EDuration="39.361906212s" podCreationTimestamp="2026-01-30 12:14:57 +0000 UTC" firstStartedPulling="2026-01-30 12:15:31.37951552 +0000 UTC m=+1167.157337174" lastFinishedPulling="2026-01-30 12:15:35.967056537 +0000 UTC m=+1171.744878191" observedRunningTime="2026-01-30 12:15:36.356058805 +0000 UTC m=+1172.133880459" watchObservedRunningTime="2026-01-30 12:15:36.361906212 +0000 UTC m=+1172.139727866" Jan 30 12:15:37 crc kubenswrapper[4703]: I0130 12:15:37.286722 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-p245s" event={"ID":"0e5cd4fd-45a1-4cdb-9317-35abf01f5c33","Type":"ContainerStarted","Data":"cbb8341a991d25e94a44ef31b96863a83727dfb8f6e8673cb1c324ff30cf43b5"} Jan 30 12:15:37 crc kubenswrapper[4703]: I0130 12:15:37.287041 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-p245s" Jan 30 12:15:37 crc kubenswrapper[4703]: I0130 12:15:37.289565 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-x2z8q" event={"ID":"842c0b1e-43ca-49b3-bcc0-a5f9714773ac","Type":"ContainerStarted","Data":"eb454c5164280748052f47d0dcff361b725ab8f10e116befa2fd48c02c81fd8d"} Jan 30 12:15:37 crc kubenswrapper[4703]: I0130 12:15:37.311239 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-p245s" podStartSLOduration=4.471447317 podStartE2EDuration="40.311211427s" podCreationTimestamp="2026-01-30 12:14:57 +0000 UTC" firstStartedPulling="2026-01-30 12:15:00.730471279 +0000 UTC m=+1136.508292933" lastFinishedPulling="2026-01-30 12:15:36.570235389 +0000 UTC m=+1172.348057043" observedRunningTime="2026-01-30 12:15:37.306351526 +0000 UTC m=+1173.084173180" watchObservedRunningTime="2026-01-30 12:15:37.311211427 +0000 UTC m=+1173.089033081" Jan 30 12:15:37 crc kubenswrapper[4703]: I0130 12:15:37.329748 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-x2z8q" podStartSLOduration=4.184054572 podStartE2EDuration="39.329715795s" podCreationTimestamp="2026-01-30 12:14:58 +0000 UTC" firstStartedPulling="2026-01-30 12:15:01.425734276 +0000 UTC m=+1137.203555930" lastFinishedPulling="2026-01-30 12:15:36.571395499 +0000 UTC m=+1172.349217153" observedRunningTime="2026-01-30 12:15:37.326306654 +0000 UTC m=+1173.104128308" watchObservedRunningTime="2026-01-30 12:15:37.329715795 +0000 UTC m=+1173.107537439" Jan 30 12:15:37 crc kubenswrapper[4703]: I0130 12:15:37.707726 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-8d874c8fc-pdt6v" Jan 30 12:15:37 crc kubenswrapper[4703]: I0130 12:15:37.924915 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-q26rs" Jan 30 12:15:38 crc kubenswrapper[4703]: I0130 12:15:38.331437 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-7dd968899f-sbcr8" Jan 30 12:15:38 crc kubenswrapper[4703]: I0130 12:15:38.333333 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-zzdzn" Jan 30 12:15:38 crc kubenswrapper[4703]: I0130 12:15:38.446244 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-6687f8d877-nxm9c" Jan 30 12:15:38 crc kubenswrapper[4703]: I0130 12:15:38.486458 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-55bff696bd-x7cwt" Jan 30 12:15:38 crc kubenswrapper[4703]: I0130 12:15:38.747765 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-zqh8w" Jan 30 12:15:39 crc kubenswrapper[4703]: I0130 12:15:39.138200 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-64b5b76f97-cwdkz" Jan 30 12:15:39 crc kubenswrapper[4703]: I0130 12:15:39.153288 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-x2z8q" Jan 30 12:15:39 crc kubenswrapper[4703]: I0130 12:15:39.315265 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-qzmpv" event={"ID":"2d50762e-d50c-4448-bb9c-19136516a622","Type":"ContainerStarted","Data":"277dbd0d160baeab3f9f5cf7eb6c6c143114441cdf189f47cbfe51ef1270f44d"} Jan 30 12:15:39 crc kubenswrapper[4703]: I0130 12:15:39.316619 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-qzmpv" Jan 30 12:15:39 crc kubenswrapper[4703]: I0130 12:15:39.318644 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-fzxq8" event={"ID":"2f2ed0c3-f32f-4402-9e49-3ef2c200c73a","Type":"ContainerStarted","Data":"992f85b34c07c16a30bca3d5d304040588b2d455182a56e613a11c7eb3f8d47b"} Jan 30 12:15:39 crc kubenswrapper[4703]: I0130 12:15:39.319229 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-fzxq8" Jan 30 12:15:39 crc kubenswrapper[4703]: I0130 12:15:39.321035 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-hw7pr" event={"ID":"f1787cd3-2c84-43bc-87dd-5356e44ba9cd","Type":"ContainerStarted","Data":"7173bc565e66a4181c893d451efb6688b8032426b8f90ba2092ee22ac6045795"} Jan 30 12:15:39 crc kubenswrapper[4703]: I0130 12:15:39.321518 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-hw7pr" Jan 30 12:15:39 crc kubenswrapper[4703]: I0130 12:15:39.337565 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-qzmpv" podStartSLOduration=5.046867545 podStartE2EDuration="42.337537794s" podCreationTimestamp="2026-01-30 12:14:57 +0000 UTC" firstStartedPulling="2026-01-30 12:15:01.300954033 +0000 UTC m=+1137.078775677" lastFinishedPulling="2026-01-30 12:15:38.591624272 +0000 UTC m=+1174.369445926" observedRunningTime="2026-01-30 12:15:39.334516953 +0000 UTC m=+1175.112338617" watchObservedRunningTime="2026-01-30 12:15:39.337537794 +0000 UTC m=+1175.115359448" Jan 30 12:15:39 crc kubenswrapper[4703]: I0130 12:15:39.362440 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-fzxq8" podStartSLOduration=4.163242122 podStartE2EDuration="42.362398533s" podCreationTimestamp="2026-01-30 12:14:57 +0000 UTC" firstStartedPulling="2026-01-30 12:15:00.392503632 +0000 UTC m=+1136.170325286" lastFinishedPulling="2026-01-30 12:15:38.591660043 +0000 UTC m=+1174.369481697" observedRunningTime="2026-01-30 12:15:39.358867018 +0000 UTC m=+1175.136688672" watchObservedRunningTime="2026-01-30 12:15:39.362398533 +0000 UTC m=+1175.140220187" Jan 30 12:15:39 crc kubenswrapper[4703]: I0130 12:15:39.395004 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-hw7pr" podStartSLOduration=4.104723322 podStartE2EDuration="41.39498175s" podCreationTimestamp="2026-01-30 12:14:58 +0000 UTC" firstStartedPulling="2026-01-30 12:15:01.297155351 +0000 UTC m=+1137.074977005" lastFinishedPulling="2026-01-30 12:15:38.587413779 +0000 UTC m=+1174.365235433" observedRunningTime="2026-01-30 12:15:39.391077235 +0000 UTC m=+1175.168898899" watchObservedRunningTime="2026-01-30 12:15:39.39498175 +0000 UTC m=+1175.172803404" Jan 30 12:15:41 crc kubenswrapper[4703]: I0130 12:15:41.340337 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-w9kvh" event={"ID":"a530abd9-3d62-4c65-b9e8-190208cbefd4","Type":"ContainerStarted","Data":"501a57a0205a678724509d819906270222324f73d3725b7284693b821739355b"} Jan 30 12:15:41 crc kubenswrapper[4703]: I0130 12:15:41.341129 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-w9kvh" Jan 30 12:15:41 crc kubenswrapper[4703]: I0130 12:15:41.366813 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-w9kvh" podStartSLOduration=4.943682944 podStartE2EDuration="44.366789089s" podCreationTimestamp="2026-01-30 12:14:57 +0000 UTC" firstStartedPulling="2026-01-30 12:15:00.951767473 +0000 UTC m=+1136.729589127" lastFinishedPulling="2026-01-30 12:15:40.374873618 +0000 UTC m=+1176.152695272" observedRunningTime="2026-01-30 12:15:41.361767525 +0000 UTC m=+1177.139589179" watchObservedRunningTime="2026-01-30 12:15:41.366789089 +0000 UTC m=+1177.144610743" Jan 30 12:15:41 crc kubenswrapper[4703]: I0130 12:15:41.477087 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-6987d5d556-cff79" Jan 30 12:15:42 crc kubenswrapper[4703]: I0130 12:15:42.352086 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-xv5lq" event={"ID":"9cc112c6-c3ca-4d9c-ab24-178578e1a41f","Type":"ContainerStarted","Data":"2dec8f703c69341498291c97d0da11b52ac3a6a8438e492042d5c471b9af352e"} Jan 30 12:15:42 crc kubenswrapper[4703]: I0130 12:15:42.352920 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-xv5lq" Jan 30 12:15:42 crc kubenswrapper[4703]: I0130 12:15:42.378564 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-xv5lq" podStartSLOduration=4.387528861 podStartE2EDuration="45.378523075s" podCreationTimestamp="2026-01-30 12:14:57 +0000 UTC" firstStartedPulling="2026-01-30 12:15:00.715464516 +0000 UTC m=+1136.493286170" lastFinishedPulling="2026-01-30 12:15:41.70645873 +0000 UTC m=+1177.484280384" observedRunningTime="2026-01-30 12:15:42.372185934 +0000 UTC m=+1178.150007588" watchObservedRunningTime="2026-01-30 12:15:42.378523075 +0000 UTC m=+1178.156344729" Jan 30 12:15:43 crc kubenswrapper[4703]: I0130 12:15:43.370425 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-2l6qz" event={"ID":"7434f3b6-d77f-48b7-8ceb-f084a9c283f3","Type":"ContainerStarted","Data":"d206be5b7813de3d9760f023afe92a1f044f2a45f370cc5718a4af260cb37153"} Jan 30 12:15:43 crc kubenswrapper[4703]: I0130 12:15:43.371573 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-2l6qz" Jan 30 12:15:43 crc kubenswrapper[4703]: I0130 12:15:43.398909 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-2l6qz" podStartSLOduration=3.613226478 podStartE2EDuration="46.398878902s" podCreationTimestamp="2026-01-30 12:14:57 +0000 UTC" firstStartedPulling="2026-01-30 12:14:59.774365035 +0000 UTC m=+1135.552186689" lastFinishedPulling="2026-01-30 12:15:42.560017459 +0000 UTC m=+1178.337839113" observedRunningTime="2026-01-30 12:15:43.387709631 +0000 UTC m=+1179.165531285" watchObservedRunningTime="2026-01-30 12:15:43.398878902 +0000 UTC m=+1179.176700556" Jan 30 12:15:43 crc kubenswrapper[4703]: I0130 12:15:43.821783 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-79955696d6-z2td9" Jan 30 12:15:44 crc kubenswrapper[4703]: I0130 12:15:44.381696 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-79c984db64-qk88l" event={"ID":"7f8888ec-800b-41b1-a297-576a28957668","Type":"ContainerStarted","Data":"535fec8f793643cade3a2cfb2975b1137a233b60418a6531e9e40eff2b46e783"} Jan 30 12:15:44 crc kubenswrapper[4703]: I0130 12:15:44.382021 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-79c984db64-qk88l" Jan 30 12:15:44 crc kubenswrapper[4703]: I0130 12:15:44.384989 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-vb4kb" event={"ID":"a634d819-9927-4b82-8ec1-959ca5f19908","Type":"ContainerStarted","Data":"cc14d60546f09cbd7287d100098050b11c749e4b807c024ab27bb8ab1552db13"} Jan 30 12:15:44 crc kubenswrapper[4703]: I0130 12:15:44.385428 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-vb4kb" Jan 30 12:15:44 crc kubenswrapper[4703]: I0130 12:15:44.403058 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-79c984db64-qk88l" podStartSLOduration=4.7979485870000005 podStartE2EDuration="46.403039593s" podCreationTimestamp="2026-01-30 12:14:58 +0000 UTC" firstStartedPulling="2026-01-30 12:15:01.560179459 +0000 UTC m=+1137.338001113" lastFinishedPulling="2026-01-30 12:15:43.165270465 +0000 UTC m=+1178.943092119" observedRunningTime="2026-01-30 12:15:44.398756038 +0000 UTC m=+1180.176577702" watchObservedRunningTime="2026-01-30 12:15:44.403039593 +0000 UTC m=+1180.180861247" Jan 30 12:15:44 crc kubenswrapper[4703]: I0130 12:15:44.417153 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-vb4kb" podStartSLOduration=4.108385135 podStartE2EDuration="47.417135542s" podCreationTimestamp="2026-01-30 12:14:57 +0000 UTC" firstStartedPulling="2026-01-30 12:15:00.372862723 +0000 UTC m=+1136.150684377" lastFinishedPulling="2026-01-30 12:15:43.68161311 +0000 UTC m=+1179.459434784" observedRunningTime="2026-01-30 12:15:44.417100331 +0000 UTC m=+1180.194921985" watchObservedRunningTime="2026-01-30 12:15:44.417135542 +0000 UTC m=+1180.194957196" Jan 30 12:15:47 crc kubenswrapper[4703]: I0130 12:15:47.694621 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b6c4d8c5f-2l6qz" Jan 30 12:15:48 crc kubenswrapper[4703]: I0130 12:15:47.855478 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-6d9697b7f4-wxxzb" Jan 30 12:15:48 crc kubenswrapper[4703]: I0130 12:15:47.885963 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-69d6db494d-fzxq8" Jan 30 12:15:48 crc kubenswrapper[4703]: I0130 12:15:48.286896 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5f4b8bd54d-p245s" Jan 30 12:15:48 crc kubenswrapper[4703]: I0130 12:15:48.320655 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-84f48565d4-xv5lq" Jan 30 12:15:48 crc kubenswrapper[4703]: I0130 12:15:48.626340 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-585dbc889-w9kvh" Jan 30 12:15:48 crc kubenswrapper[4703]: I0130 12:15:48.996959 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-qzmpv" Jan 30 12:15:49 crc kubenswrapper[4703]: I0130 12:15:49.084490 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-hw7pr" Jan 30 12:15:49 crc kubenswrapper[4703]: I0130 12:15:49.156026 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-x2z8q" Jan 30 12:15:49 crc kubenswrapper[4703]: I0130 12:15:49.512066 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-79c984db64-qk88l" Jan 30 12:15:50 crc kubenswrapper[4703]: I0130 12:15:50.457542 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2" Jan 30 12:15:57 crc kubenswrapper[4703]: I0130 12:15:57.812643 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-8886f4c47-vb4kb" Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.697568 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7lrrn"] Jan 30 12:16:17 crc kubenswrapper[4703]: E0130 12:16:17.698853 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b063a5d7-e03b-46db-85b0-75adab6bca8f" containerName="collect-profiles" Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.698888 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="b063a5d7-e03b-46db-85b0-75adab6bca8f" containerName="collect-profiles" Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.699057 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="b063a5d7-e03b-46db-85b0-75adab6bca8f" containerName="collect-profiles" Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.700324 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-7lrrn" Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.704252 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.704283 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-4jksx" Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.704281 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.704363 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.714455 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hcjd\" (UniqueName: \"kubernetes.io/projected/df2cb2b8-20fb-4df2-bf4d-499d73faa55d-kube-api-access-2hcjd\") pod \"dnsmasq-dns-675f4bcbfc-7lrrn\" (UID: \"df2cb2b8-20fb-4df2-bf4d-499d73faa55d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7lrrn" Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.714554 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df2cb2b8-20fb-4df2-bf4d-499d73faa55d-config\") pod \"dnsmasq-dns-675f4bcbfc-7lrrn\" (UID: \"df2cb2b8-20fb-4df2-bf4d-499d73faa55d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7lrrn" Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.792094 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7lrrn"] Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.815197 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df2cb2b8-20fb-4df2-bf4d-499d73faa55d-config\") pod \"dnsmasq-dns-675f4bcbfc-7lrrn\" (UID: \"df2cb2b8-20fb-4df2-bf4d-499d73faa55d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7lrrn" Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.815294 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hcjd\" (UniqueName: \"kubernetes.io/projected/df2cb2b8-20fb-4df2-bf4d-499d73faa55d-kube-api-access-2hcjd\") pod \"dnsmasq-dns-675f4bcbfc-7lrrn\" (UID: \"df2cb2b8-20fb-4df2-bf4d-499d73faa55d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7lrrn" Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.816604 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df2cb2b8-20fb-4df2-bf4d-499d73faa55d-config\") pod \"dnsmasq-dns-675f4bcbfc-7lrrn\" (UID: \"df2cb2b8-20fb-4df2-bf4d-499d73faa55d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7lrrn" Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.844478 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hcjd\" (UniqueName: \"kubernetes.io/projected/df2cb2b8-20fb-4df2-bf4d-499d73faa55d-kube-api-access-2hcjd\") pod \"dnsmasq-dns-675f4bcbfc-7lrrn\" (UID: \"df2cb2b8-20fb-4df2-bf4d-499d73faa55d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-7lrrn" Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.939974 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-w2bq4"] Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.941379 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-w2bq4" Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.945258 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 30 12:16:17 crc kubenswrapper[4703]: I0130 12:16:17.975435 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-w2bq4"] Jan 30 12:16:18 crc kubenswrapper[4703]: I0130 12:16:18.021105 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-7lrrn" Jan 30 12:16:18 crc kubenswrapper[4703]: I0130 12:16:18.121513 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2rb8\" (UniqueName: \"kubernetes.io/projected/cd258110-c376-4e35-a99c-8ed43e5d7631-kube-api-access-d2rb8\") pod \"dnsmasq-dns-78dd6ddcc-w2bq4\" (UID: \"cd258110-c376-4e35-a99c-8ed43e5d7631\") " pod="openstack/dnsmasq-dns-78dd6ddcc-w2bq4" Jan 30 12:16:18 crc kubenswrapper[4703]: I0130 12:16:18.121645 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd258110-c376-4e35-a99c-8ed43e5d7631-config\") pod \"dnsmasq-dns-78dd6ddcc-w2bq4\" (UID: \"cd258110-c376-4e35-a99c-8ed43e5d7631\") " pod="openstack/dnsmasq-dns-78dd6ddcc-w2bq4" Jan 30 12:16:18 crc kubenswrapper[4703]: I0130 12:16:18.121709 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cd258110-c376-4e35-a99c-8ed43e5d7631-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-w2bq4\" (UID: \"cd258110-c376-4e35-a99c-8ed43e5d7631\") " pod="openstack/dnsmasq-dns-78dd6ddcc-w2bq4" Jan 30 12:16:18 crc kubenswrapper[4703]: I0130 12:16:18.223874 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2rb8\" (UniqueName: \"kubernetes.io/projected/cd258110-c376-4e35-a99c-8ed43e5d7631-kube-api-access-d2rb8\") pod \"dnsmasq-dns-78dd6ddcc-w2bq4\" (UID: \"cd258110-c376-4e35-a99c-8ed43e5d7631\") " pod="openstack/dnsmasq-dns-78dd6ddcc-w2bq4" Jan 30 12:16:18 crc kubenswrapper[4703]: I0130 12:16:18.223963 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd258110-c376-4e35-a99c-8ed43e5d7631-config\") pod \"dnsmasq-dns-78dd6ddcc-w2bq4\" (UID: \"cd258110-c376-4e35-a99c-8ed43e5d7631\") " pod="openstack/dnsmasq-dns-78dd6ddcc-w2bq4" Jan 30 12:16:18 crc kubenswrapper[4703]: I0130 12:16:18.224020 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cd258110-c376-4e35-a99c-8ed43e5d7631-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-w2bq4\" (UID: \"cd258110-c376-4e35-a99c-8ed43e5d7631\") " pod="openstack/dnsmasq-dns-78dd6ddcc-w2bq4" Jan 30 12:16:18 crc kubenswrapper[4703]: I0130 12:16:18.225895 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd258110-c376-4e35-a99c-8ed43e5d7631-config\") pod \"dnsmasq-dns-78dd6ddcc-w2bq4\" (UID: \"cd258110-c376-4e35-a99c-8ed43e5d7631\") " pod="openstack/dnsmasq-dns-78dd6ddcc-w2bq4" Jan 30 12:16:18 crc kubenswrapper[4703]: I0130 12:16:18.225922 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cd258110-c376-4e35-a99c-8ed43e5d7631-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-w2bq4\" (UID: \"cd258110-c376-4e35-a99c-8ed43e5d7631\") " pod="openstack/dnsmasq-dns-78dd6ddcc-w2bq4" Jan 30 12:16:18 crc kubenswrapper[4703]: I0130 12:16:18.256261 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2rb8\" (UniqueName: \"kubernetes.io/projected/cd258110-c376-4e35-a99c-8ed43e5d7631-kube-api-access-d2rb8\") pod \"dnsmasq-dns-78dd6ddcc-w2bq4\" (UID: \"cd258110-c376-4e35-a99c-8ed43e5d7631\") " pod="openstack/dnsmasq-dns-78dd6ddcc-w2bq4" Jan 30 12:16:18 crc kubenswrapper[4703]: I0130 12:16:18.266325 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-w2bq4" Jan 30 12:16:18 crc kubenswrapper[4703]: I0130 12:16:18.908492 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7lrrn"] Jan 30 12:16:19 crc kubenswrapper[4703]: I0130 12:16:19.011684 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-w2bq4"] Jan 30 12:16:19 crc kubenswrapper[4703]: I0130 12:16:19.869162 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-w2bq4" event={"ID":"cd258110-c376-4e35-a99c-8ed43e5d7631","Type":"ContainerStarted","Data":"4843004a828eda8ff56e5a2861b9c6a266ef451f774fef0c65167995c8922ed2"} Jan 30 12:16:19 crc kubenswrapper[4703]: I0130 12:16:19.871161 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-7lrrn" event={"ID":"df2cb2b8-20fb-4df2-bf4d-499d73faa55d","Type":"ContainerStarted","Data":"c54568367a62580080e9edfbb2834644ae5a936a58300d074d1930bb3716e131"} Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.301803 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7lrrn"] Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.338105 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-77d6s"] Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.345415 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-77d6s" Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.367171 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-77d6s"] Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.475489 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jllgf\" (UniqueName: \"kubernetes.io/projected/dfd6f4da-4223-4439-83d7-3204e7ad5803-kube-api-access-jllgf\") pod \"dnsmasq-dns-5ccc8479f9-77d6s\" (UID: \"dfd6f4da-4223-4439-83d7-3204e7ad5803\") " pod="openstack/dnsmasq-dns-5ccc8479f9-77d6s" Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.475613 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfd6f4da-4223-4439-83d7-3204e7ad5803-config\") pod \"dnsmasq-dns-5ccc8479f9-77d6s\" (UID: \"dfd6f4da-4223-4439-83d7-3204e7ad5803\") " pod="openstack/dnsmasq-dns-5ccc8479f9-77d6s" Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.475753 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dfd6f4da-4223-4439-83d7-3204e7ad5803-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-77d6s\" (UID: \"dfd6f4da-4223-4439-83d7-3204e7ad5803\") " pod="openstack/dnsmasq-dns-5ccc8479f9-77d6s" Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.577756 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dfd6f4da-4223-4439-83d7-3204e7ad5803-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-77d6s\" (UID: \"dfd6f4da-4223-4439-83d7-3204e7ad5803\") " pod="openstack/dnsmasq-dns-5ccc8479f9-77d6s" Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.577909 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jllgf\" (UniqueName: \"kubernetes.io/projected/dfd6f4da-4223-4439-83d7-3204e7ad5803-kube-api-access-jllgf\") pod \"dnsmasq-dns-5ccc8479f9-77d6s\" (UID: \"dfd6f4da-4223-4439-83d7-3204e7ad5803\") " pod="openstack/dnsmasq-dns-5ccc8479f9-77d6s" Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.577946 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfd6f4da-4223-4439-83d7-3204e7ad5803-config\") pod \"dnsmasq-dns-5ccc8479f9-77d6s\" (UID: \"dfd6f4da-4223-4439-83d7-3204e7ad5803\") " pod="openstack/dnsmasq-dns-5ccc8479f9-77d6s" Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.578994 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfd6f4da-4223-4439-83d7-3204e7ad5803-config\") pod \"dnsmasq-dns-5ccc8479f9-77d6s\" (UID: \"dfd6f4da-4223-4439-83d7-3204e7ad5803\") " pod="openstack/dnsmasq-dns-5ccc8479f9-77d6s" Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.579630 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dfd6f4da-4223-4439-83d7-3204e7ad5803-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-77d6s\" (UID: \"dfd6f4da-4223-4439-83d7-3204e7ad5803\") " pod="openstack/dnsmasq-dns-5ccc8479f9-77d6s" Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.627286 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jllgf\" (UniqueName: \"kubernetes.io/projected/dfd6f4da-4223-4439-83d7-3204e7ad5803-kube-api-access-jllgf\") pod \"dnsmasq-dns-5ccc8479f9-77d6s\" (UID: \"dfd6f4da-4223-4439-83d7-3204e7ad5803\") " pod="openstack/dnsmasq-dns-5ccc8479f9-77d6s" Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.676926 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-77d6s" Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.785030 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-w2bq4"] Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.826017 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qmg2h"] Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.857573 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qmg2h"] Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.858043 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-qmg2h" Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.987954 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1f2a65f3-c251-40ad-9900-2b96f6298f79-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-qmg2h\" (UID: \"1f2a65f3-c251-40ad-9900-2b96f6298f79\") " pod="openstack/dnsmasq-dns-57d769cc4f-qmg2h" Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.988469 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f2a65f3-c251-40ad-9900-2b96f6298f79-config\") pod \"dnsmasq-dns-57d769cc4f-qmg2h\" (UID: \"1f2a65f3-c251-40ad-9900-2b96f6298f79\") " pod="openstack/dnsmasq-dns-57d769cc4f-qmg2h" Jan 30 12:16:20 crc kubenswrapper[4703]: I0130 12:16:20.988560 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nslw6\" (UniqueName: \"kubernetes.io/projected/1f2a65f3-c251-40ad-9900-2b96f6298f79-kube-api-access-nslw6\") pod \"dnsmasq-dns-57d769cc4f-qmg2h\" (UID: \"1f2a65f3-c251-40ad-9900-2b96f6298f79\") " pod="openstack/dnsmasq-dns-57d769cc4f-qmg2h" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.089870 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1f2a65f3-c251-40ad-9900-2b96f6298f79-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-qmg2h\" (UID: \"1f2a65f3-c251-40ad-9900-2b96f6298f79\") " pod="openstack/dnsmasq-dns-57d769cc4f-qmg2h" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.089914 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f2a65f3-c251-40ad-9900-2b96f6298f79-config\") pod \"dnsmasq-dns-57d769cc4f-qmg2h\" (UID: \"1f2a65f3-c251-40ad-9900-2b96f6298f79\") " pod="openstack/dnsmasq-dns-57d769cc4f-qmg2h" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.089980 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nslw6\" (UniqueName: \"kubernetes.io/projected/1f2a65f3-c251-40ad-9900-2b96f6298f79-kube-api-access-nslw6\") pod \"dnsmasq-dns-57d769cc4f-qmg2h\" (UID: \"1f2a65f3-c251-40ad-9900-2b96f6298f79\") " pod="openstack/dnsmasq-dns-57d769cc4f-qmg2h" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.091610 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1f2a65f3-c251-40ad-9900-2b96f6298f79-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-qmg2h\" (UID: \"1f2a65f3-c251-40ad-9900-2b96f6298f79\") " pod="openstack/dnsmasq-dns-57d769cc4f-qmg2h" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.092218 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f2a65f3-c251-40ad-9900-2b96f6298f79-config\") pod \"dnsmasq-dns-57d769cc4f-qmg2h\" (UID: \"1f2a65f3-c251-40ad-9900-2b96f6298f79\") " pod="openstack/dnsmasq-dns-57d769cc4f-qmg2h" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.121226 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nslw6\" (UniqueName: \"kubernetes.io/projected/1f2a65f3-c251-40ad-9900-2b96f6298f79-kube-api-access-nslw6\") pod \"dnsmasq-dns-57d769cc4f-qmg2h\" (UID: \"1f2a65f3-c251-40ad-9900-2b96f6298f79\") " pod="openstack/dnsmasq-dns-57d769cc4f-qmg2h" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.204503 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-qmg2h" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.427200 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-77d6s"] Jan 30 12:16:21 crc kubenswrapper[4703]: W0130 12:16:21.447026 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddfd6f4da_4223_4439_83d7_3204e7ad5803.slice/crio-045d96fc8724f288e4fcdd26edb5410b4b0e0232111db1eae3599a15d675f1a2 WatchSource:0}: Error finding container 045d96fc8724f288e4fcdd26edb5410b4b0e0232111db1eae3599a15d675f1a2: Status 404 returned error can't find the container with id 045d96fc8724f288e4fcdd26edb5410b4b0e0232111db1eae3599a15d675f1a2 Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.552133 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.560972 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.565882 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-hlnmk" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.566168 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.566351 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.566516 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.566659 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.566939 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.567146 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.599483 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.702294 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.702786 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.702835 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.702973 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.703001 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.703030 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.703377 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.703540 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.703617 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.703636 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.703657 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n94qd\" (UniqueName: \"kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-kube-api-access-n94qd\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.806321 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.806401 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.806432 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.806463 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.806523 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.806561 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.806594 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.806620 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.806643 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n94qd\" (UniqueName: \"kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-kube-api-access-n94qd\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.806712 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.806743 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.808717 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.821838 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.822261 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.823659 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.824593 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.825436 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.825961 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.831452 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.834313 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.844962 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.879951 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n94qd\" (UniqueName: \"kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-kube-api-access-n94qd\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.909496 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.943268 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.953241 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-77d6s" event={"ID":"dfd6f4da-4223-4439-83d7-3204e7ad5803","Type":"ContainerStarted","Data":"045d96fc8724f288e4fcdd26edb5410b4b0e0232111db1eae3599a15d675f1a2"} Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.953411 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.960532 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.960861 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-tpbrp" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.964246 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.964641 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.964807 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.964937 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.965045 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 30 12:16:21 crc kubenswrapper[4703]: I0130 12:16:21.965590 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.024350 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.024411 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.024441 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-pod-info\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.024460 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.024516 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-server-conf\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.024556 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.024637 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-config-data\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.024795 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.024841 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.024879 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.024917 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbd8j\" (UniqueName: \"kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-kube-api-access-gbd8j\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.088375 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qmg2h"] Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.126568 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-server-conf\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.126635 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.126657 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-config-data\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.126717 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.126745 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.126774 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.126801 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbd8j\" (UniqueName: \"kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-kube-api-access-gbd8j\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.126842 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.126873 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.126908 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-pod-info\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.126938 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.128906 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.129854 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.130294 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-config-data\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.130782 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.131021 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.136647 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-pod-info\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.137600 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-server-conf\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.138140 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.138265 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.139186 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.149716 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbd8j\" (UniqueName: \"kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-kube-api-access-gbd8j\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.175551 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " pod="openstack/rabbitmq-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.214190 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:16:22 crc kubenswrapper[4703]: I0130 12:16:22.298457 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.000886 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-qmg2h" event={"ID":"1f2a65f3-c251-40ad-9900-2b96f6298f79","Type":"ContainerStarted","Data":"8ee0b934528a7bcdb6d69b57d6272984ce2fa6ee882e2ffc1e69acda45b5a8a1"} Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.251803 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.257539 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.285872 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.293713 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-vl7kf" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.293913 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.294241 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.294415 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.305020 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.346131 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.380490 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.450002 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.450469 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f0ae0323-f870-408b-b688-df1b4e3e8da6-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.450542 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0ae0323-f870-408b-b688-df1b4e3e8da6-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.450590 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0ae0323-f870-408b-b688-df1b4e3e8da6-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.450622 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f0ae0323-f870-408b-b688-df1b4e3e8da6-config-data-default\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.450683 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f0ae0323-f870-408b-b688-df1b4e3e8da6-kolla-config\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.450710 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqgkl\" (UniqueName: \"kubernetes.io/projected/f0ae0323-f870-408b-b688-df1b4e3e8da6-kube-api-access-qqgkl\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.450755 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f0ae0323-f870-408b-b688-df1b4e3e8da6-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.554613 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.554685 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f0ae0323-f870-408b-b688-df1b4e3e8da6-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.554723 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0ae0323-f870-408b-b688-df1b4e3e8da6-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.554751 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0ae0323-f870-408b-b688-df1b4e3e8da6-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.554780 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f0ae0323-f870-408b-b688-df1b4e3e8da6-config-data-default\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.554850 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f0ae0323-f870-408b-b688-df1b4e3e8da6-kolla-config\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.554894 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqgkl\" (UniqueName: \"kubernetes.io/projected/f0ae0323-f870-408b-b688-df1b4e3e8da6-kube-api-access-qqgkl\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.554926 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f0ae0323-f870-408b-b688-df1b4e3e8da6-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.555788 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.556696 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f0ae0323-f870-408b-b688-df1b4e3e8da6-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.557446 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f0ae0323-f870-408b-b688-df1b4e3e8da6-config-data-default\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.558784 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f0ae0323-f870-408b-b688-df1b4e3e8da6-kolla-config\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.555846 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f0ae0323-f870-408b-b688-df1b4e3e8da6-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.596063 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0ae0323-f870-408b-b688-df1b4e3e8da6-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.596437 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0ae0323-f870-408b-b688-df1b4e3e8da6-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.599740 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqgkl\" (UniqueName: \"kubernetes.io/projected/f0ae0323-f870-408b-b688-df1b4e3e8da6-kube-api-access-qqgkl\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.632090 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"openstack-galera-0\" (UID: \"f0ae0323-f870-408b-b688-df1b4e3e8da6\") " pod="openstack/openstack-galera-0" Jan 30 12:16:23 crc kubenswrapper[4703]: I0130 12:16:23.926941 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.369620 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5","Type":"ContainerStarted","Data":"3da7eae58bf75f8f1addba1b20a1635775b798409eb0a38b061073326c19ab4c"} Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.389415 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b","Type":"ContainerStarted","Data":"204287faf16f8f53f34353c63cc1f9ce62cf869e369fb2750e9284e0786692b2"} Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.628391 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.633822 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.640152 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-4b5dd" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.652452 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.652881 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.656058 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.660324 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.775507 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.775595 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.775638 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.775680 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.775766 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5zj5\" (UniqueName: \"kubernetes.io/projected/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-kube-api-access-s5zj5\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.775806 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.775870 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.775896 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.878452 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.878577 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5zj5\" (UniqueName: \"kubernetes.io/projected/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-kube-api-access-s5zj5\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.878624 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.878688 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.878716 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.878767 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.878816 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.878861 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.881008 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.881338 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.885768 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.890905 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.894612 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.899007 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.927970 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:24 crc kubenswrapper[4703]: I0130 12:16:24.937544 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5zj5\" (UniqueName: \"kubernetes.io/projected/e5c696fa-999f-48c2-bf1a-e015ec7e7ab1-kube-api-access-s5zj5\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.003552 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1\") " pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.163173 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.164618 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.171660 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-rjz48" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.171898 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.172072 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.239641 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.301755 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5650e0f-dfd5-4fc5-a728-b1eab91a0d23-memcached-tls-certs\") pod \"memcached-0\" (UID: \"a5650e0f-dfd5-4fc5-a728-b1eab91a0d23\") " pod="openstack/memcached-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.302189 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5650e0f-dfd5-4fc5-a728-b1eab91a0d23-combined-ca-bundle\") pod \"memcached-0\" (UID: \"a5650e0f-dfd5-4fc5-a728-b1eab91a0d23\") " pod="openstack/memcached-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.302247 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzkw9\" (UniqueName: \"kubernetes.io/projected/a5650e0f-dfd5-4fc5-a728-b1eab91a0d23-kube-api-access-wzkw9\") pod \"memcached-0\" (UID: \"a5650e0f-dfd5-4fc5-a728-b1eab91a0d23\") " pod="openstack/memcached-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.302353 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a5650e0f-dfd5-4fc5-a728-b1eab91a0d23-kolla-config\") pod \"memcached-0\" (UID: \"a5650e0f-dfd5-4fc5-a728-b1eab91a0d23\") " pod="openstack/memcached-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.302396 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a5650e0f-dfd5-4fc5-a728-b1eab91a0d23-config-data\") pod \"memcached-0\" (UID: \"a5650e0f-dfd5-4fc5-a728-b1eab91a0d23\") " pod="openstack/memcached-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.306424 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.403863 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5650e0f-dfd5-4fc5-a728-b1eab91a0d23-memcached-tls-certs\") pod \"memcached-0\" (UID: \"a5650e0f-dfd5-4fc5-a728-b1eab91a0d23\") " pod="openstack/memcached-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.405352 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5650e0f-dfd5-4fc5-a728-b1eab91a0d23-combined-ca-bundle\") pod \"memcached-0\" (UID: \"a5650e0f-dfd5-4fc5-a728-b1eab91a0d23\") " pod="openstack/memcached-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.405439 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzkw9\" (UniqueName: \"kubernetes.io/projected/a5650e0f-dfd5-4fc5-a728-b1eab91a0d23-kube-api-access-wzkw9\") pod \"memcached-0\" (UID: \"a5650e0f-dfd5-4fc5-a728-b1eab91a0d23\") " pod="openstack/memcached-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.405606 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a5650e0f-dfd5-4fc5-a728-b1eab91a0d23-kolla-config\") pod \"memcached-0\" (UID: \"a5650e0f-dfd5-4fc5-a728-b1eab91a0d23\") " pod="openstack/memcached-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.405645 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a5650e0f-dfd5-4fc5-a728-b1eab91a0d23-config-data\") pod \"memcached-0\" (UID: \"a5650e0f-dfd5-4fc5-a728-b1eab91a0d23\") " pod="openstack/memcached-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.406831 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a5650e0f-dfd5-4fc5-a728-b1eab91a0d23-config-data\") pod \"memcached-0\" (UID: \"a5650e0f-dfd5-4fc5-a728-b1eab91a0d23\") " pod="openstack/memcached-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.406889 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/a5650e0f-dfd5-4fc5-a728-b1eab91a0d23-kolla-config\") pod \"memcached-0\" (UID: \"a5650e0f-dfd5-4fc5-a728-b1eab91a0d23\") " pod="openstack/memcached-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.415722 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5650e0f-dfd5-4fc5-a728-b1eab91a0d23-memcached-tls-certs\") pod \"memcached-0\" (UID: \"a5650e0f-dfd5-4fc5-a728-b1eab91a0d23\") " pod="openstack/memcached-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.416271 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5650e0f-dfd5-4fc5-a728-b1eab91a0d23-combined-ca-bundle\") pod \"memcached-0\" (UID: \"a5650e0f-dfd5-4fc5-a728-b1eab91a0d23\") " pod="openstack/memcached-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.453752 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzkw9\" (UniqueName: \"kubernetes.io/projected/a5650e0f-dfd5-4fc5-a728-b1eab91a0d23-kube-api-access-wzkw9\") pod \"memcached-0\" (UID: \"a5650e0f-dfd5-4fc5-a728-b1eab91a0d23\") " pod="openstack/memcached-0" Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.469718 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 30 12:16:25 crc kubenswrapper[4703]: I0130 12:16:25.661666 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 30 12:16:26 crc kubenswrapper[4703]: I0130 12:16:26.203268 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 30 12:16:26 crc kubenswrapper[4703]: I0130 12:16:26.491588 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1","Type":"ContainerStarted","Data":"994a72e7b5b785149a64188a99d5fedd77c63b8d551d02fbe5a13f08a915bc2c"} Jan 30 12:16:26 crc kubenswrapper[4703]: I0130 12:16:26.499453 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f0ae0323-f870-408b-b688-df1b4e3e8da6","Type":"ContainerStarted","Data":"de1b8074b0239cac4e2d41dc2d3c956dd9f772be8fc756a6bc5ec7a96ba43412"} Jan 30 12:16:26 crc kubenswrapper[4703]: I0130 12:16:26.891480 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 30 12:16:26 crc kubenswrapper[4703]: W0130 12:16:26.917455 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda5650e0f_dfd5_4fc5_a728_b1eab91a0d23.slice/crio-eb599f555739ecf4fe72a9e5cef7fa9e4fbecd8f91f6fb15e68a04bf616cd151 WatchSource:0}: Error finding container eb599f555739ecf4fe72a9e5cef7fa9e4fbecd8f91f6fb15e68a04bf616cd151: Status 404 returned error can't find the container with id eb599f555739ecf4fe72a9e5cef7fa9e4fbecd8f91f6fb15e68a04bf616cd151 Jan 30 12:16:27 crc kubenswrapper[4703]: I0130 12:16:27.164147 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 30 12:16:27 crc kubenswrapper[4703]: I0130 12:16:27.165475 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 30 12:16:27 crc kubenswrapper[4703]: I0130 12:16:27.165606 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 30 12:16:27 crc kubenswrapper[4703]: I0130 12:16:27.178624 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-g6sz7" Jan 30 12:16:27 crc kubenswrapper[4703]: I0130 12:16:27.263772 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cbff\" (UniqueName: \"kubernetes.io/projected/7847f607-512e-440a-af08-8fc3104621b6-kube-api-access-2cbff\") pod \"kube-state-metrics-0\" (UID: \"7847f607-512e-440a-af08-8fc3104621b6\") " pod="openstack/kube-state-metrics-0" Jan 30 12:16:27 crc kubenswrapper[4703]: I0130 12:16:27.374553 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cbff\" (UniqueName: \"kubernetes.io/projected/7847f607-512e-440a-af08-8fc3104621b6-kube-api-access-2cbff\") pod \"kube-state-metrics-0\" (UID: \"7847f607-512e-440a-af08-8fc3104621b6\") " pod="openstack/kube-state-metrics-0" Jan 30 12:16:27 crc kubenswrapper[4703]: I0130 12:16:27.466609 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cbff\" (UniqueName: \"kubernetes.io/projected/7847f607-512e-440a-af08-8fc3104621b6-kube-api-access-2cbff\") pod \"kube-state-metrics-0\" (UID: \"7847f607-512e-440a-af08-8fc3104621b6\") " pod="openstack/kube-state-metrics-0" Jan 30 12:16:27 crc kubenswrapper[4703]: I0130 12:16:27.514890 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 30 12:16:27 crc kubenswrapper[4703]: I0130 12:16:27.532328 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"a5650e0f-dfd5-4fc5-a728-b1eab91a0d23","Type":"ContainerStarted","Data":"eb599f555739ecf4fe72a9e5cef7fa9e4fbecd8f91f6fb15e68a04bf616cd151"} Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.610443 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.616531 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.619603 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.619822 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.621008 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.621180 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.621874 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.622472 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.622759 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-f82pr" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.622930 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.645410 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.719581 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8e865e8b-7723-4aed-b51c-ce7a8da59d13-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.719782 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f44c6b57-502f-456c-b62d-7562ab4250af\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.719878 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.720000 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-config\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.720033 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.720764 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8e865e8b-7723-4aed-b51c-ce7a8da59d13-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.720907 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.720972 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.721221 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.721260 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spssg\" (UniqueName: \"kubernetes.io/projected/8e865e8b-7723-4aed-b51c-ce7a8da59d13-kube-api-access-spssg\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.731615 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.824532 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8e865e8b-7723-4aed-b51c-ce7a8da59d13-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.824757 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.824814 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.824872 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spssg\" (UniqueName: \"kubernetes.io/projected/8e865e8b-7723-4aed-b51c-ce7a8da59d13-kube-api-access-spssg\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.824954 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.825085 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8e865e8b-7723-4aed-b51c-ce7a8da59d13-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.825263 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f44c6b57-502f-456c-b62d-7562ab4250af\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.825339 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.825422 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-config\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.825454 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.827273 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.827598 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.830593 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.836848 4703 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.836902 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f44c6b57-502f-456c-b62d-7562ab4250af\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e923a3fbd1684cd164e99ff35e919b68d893d89a026a0f736ef548b3af68c494/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.839697 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8e865e8b-7723-4aed-b51c-ce7a8da59d13-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.842395 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.861311 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8e865e8b-7723-4aed-b51c-ce7a8da59d13-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.873244 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-config\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.901200 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spssg\" (UniqueName: \"kubernetes.io/projected/8e865e8b-7723-4aed-b51c-ce7a8da59d13-kube-api-access-spssg\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.902313 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.960059 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f44c6b57-502f-456c-b62d-7562ab4250af\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") pod \"prometheus-metric-storage-0\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:28 crc kubenswrapper[4703]: I0130 12:16:28.977448 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 30 12:16:29 crc kubenswrapper[4703]: I0130 12:16:29.650114 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"7847f607-512e-440a-af08-8fc3104621b6","Type":"ContainerStarted","Data":"5b453f412f4d9bec9b19ade585ee14310446e6d5fca143a0b323c638dcd53e9c"} Jan 30 12:16:29 crc kubenswrapper[4703]: I0130 12:16:29.926765 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 30 12:16:29 crc kubenswrapper[4703]: W0130 12:16:29.977992 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e865e8b_7723_4aed_b51c_ce7a8da59d13.slice/crio-499823e6026b73c90021591cf3b6c9354fc21c44ed9e857cb318f9b4045b8f62 WatchSource:0}: Error finding container 499823e6026b73c90021591cf3b6c9354fc21c44ed9e857cb318f9b4045b8f62: Status 404 returned error can't find the container with id 499823e6026b73c90021591cf3b6c9354fc21c44ed9e857cb318f9b4045b8f62 Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.200977 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-rjbtf"] Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.203594 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.217963 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-7v2lg" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.218526 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.218949 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.272278 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-rjbtf"] Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.274799 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd146d96-e737-48a6-a3e4-d414913da90f-ovn-controller-tls-certs\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.274862 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fd146d96-e737-48a6-a3e4-d414913da90f-scripts\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.274907 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fd146d96-e737-48a6-a3e4-d414913da90f-var-run\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.274945 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd146d96-e737-48a6-a3e4-d414913da90f-combined-ca-bundle\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.274986 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-286j4\" (UniqueName: \"kubernetes.io/projected/fd146d96-e737-48a6-a3e4-d414913da90f-kube-api-access-286j4\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.275018 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fd146d96-e737-48a6-a3e4-d414913da90f-var-run-ovn\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.275049 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fd146d96-e737-48a6-a3e4-d414913da90f-var-log-ovn\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.331168 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-tg9tq"] Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.333919 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.342711 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-tg9tq"] Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.376979 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd146d96-e737-48a6-a3e4-d414913da90f-combined-ca-bundle\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.377039 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-286j4\" (UniqueName: \"kubernetes.io/projected/fd146d96-e737-48a6-a3e4-d414913da90f-kube-api-access-286j4\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.377075 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fd146d96-e737-48a6-a3e4-d414913da90f-var-run-ovn\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.378499 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fd146d96-e737-48a6-a3e4-d414913da90f-var-run-ovn\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.379320 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fd146d96-e737-48a6-a3e4-d414913da90f-var-log-ovn\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.380109 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd146d96-e737-48a6-a3e4-d414913da90f-ovn-controller-tls-certs\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.380192 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fd146d96-e737-48a6-a3e4-d414913da90f-scripts\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.380245 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fd146d96-e737-48a6-a3e4-d414913da90f-var-run\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.380558 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fd146d96-e737-48a6-a3e4-d414913da90f-var-run\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.380840 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fd146d96-e737-48a6-a3e4-d414913da90f-var-log-ovn\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.387022 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd146d96-e737-48a6-a3e4-d414913da90f-combined-ca-bundle\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.393365 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/fd146d96-e737-48a6-a3e4-d414913da90f-ovn-controller-tls-certs\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.400170 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-286j4\" (UniqueName: \"kubernetes.io/projected/fd146d96-e737-48a6-a3e4-d414913da90f-kube-api-access-286j4\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.401808 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fd146d96-e737-48a6-a3e4-d414913da90f-scripts\") pod \"ovn-controller-rjbtf\" (UID: \"fd146d96-e737-48a6-a3e4-d414913da90f\") " pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.483982 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/20007227-8914-4ec9-ad56-1bf477408476-var-lib\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.484062 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/20007227-8914-4ec9-ad56-1bf477408476-scripts\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.484257 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/20007227-8914-4ec9-ad56-1bf477408476-var-log\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.484318 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/20007227-8914-4ec9-ad56-1bf477408476-var-run\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.484373 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/20007227-8914-4ec9-ad56-1bf477408476-etc-ovs\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.484440 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rxgz\" (UniqueName: \"kubernetes.io/projected/20007227-8914-4ec9-ad56-1bf477408476-kube-api-access-5rxgz\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.500908 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.506319 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.508979 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.509362 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.509707 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.509859 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-dlqlg" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.525602 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.540194 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.587360 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/20007227-8914-4ec9-ad56-1bf477408476-var-run\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.587808 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/20007227-8914-4ec9-ad56-1bf477408476-etc-ovs\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.588039 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rxgz\" (UniqueName: \"kubernetes.io/projected/20007227-8914-4ec9-ad56-1bf477408476-kube-api-access-5rxgz\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.588221 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/20007227-8914-4ec9-ad56-1bf477408476-var-run\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.588608 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/20007227-8914-4ec9-ad56-1bf477408476-etc-ovs\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.588607 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/20007227-8914-4ec9-ad56-1bf477408476-var-lib\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.588775 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/20007227-8914-4ec9-ad56-1bf477408476-scripts\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.588935 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/20007227-8914-4ec9-ad56-1bf477408476-var-log\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.589302 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/20007227-8914-4ec9-ad56-1bf477408476-var-lib\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.589423 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/20007227-8914-4ec9-ad56-1bf477408476-var-log\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.602252 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-rjbtf" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.644340 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/20007227-8914-4ec9-ad56-1bf477408476-scripts\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.645694 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rxgz\" (UniqueName: \"kubernetes.io/projected/20007227-8914-4ec9-ad56-1bf477408476-kube-api-access-5rxgz\") pod \"ovn-controller-ovs-tg9tq\" (UID: \"20007227-8914-4ec9-ad56-1bf477408476\") " pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.659640 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.687692 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8e865e8b-7723-4aed-b51c-ce7a8da59d13","Type":"ContainerStarted","Data":"499823e6026b73c90021591cf3b6c9354fc21c44ed9e857cb318f9b4045b8f62"} Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.693608 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5b848cfc-5296-423c-9a02-45bf4c2c850b-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.694005 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b848cfc-5296-423c-9a02-45bf4c2c850b-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.694161 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.694365 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b848cfc-5296-423c-9a02-45bf4c2c850b-config\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.694717 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b848cfc-5296-423c-9a02-45bf4c2c850b-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.694879 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tswft\" (UniqueName: \"kubernetes.io/projected/5b848cfc-5296-423c-9a02-45bf4c2c850b-kube-api-access-tswft\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.695210 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b848cfc-5296-423c-9a02-45bf4c2c850b-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.695372 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b848cfc-5296-423c-9a02-45bf4c2c850b-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.798989 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.799341 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b848cfc-5296-423c-9a02-45bf4c2c850b-config\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.799365 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b848cfc-5296-423c-9a02-45bf4c2c850b-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.799406 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tswft\" (UniqueName: \"kubernetes.io/projected/5b848cfc-5296-423c-9a02-45bf4c2c850b-kube-api-access-tswft\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.799480 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b848cfc-5296-423c-9a02-45bf4c2c850b-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.799517 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b848cfc-5296-423c-9a02-45bf4c2c850b-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.799657 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5b848cfc-5296-423c-9a02-45bf4c2c850b-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.799824 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b848cfc-5296-423c-9a02-45bf4c2c850b-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.799982 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.806540 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b848cfc-5296-423c-9a02-45bf4c2c850b-config\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.806571 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b848cfc-5296-423c-9a02-45bf4c2c850b-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.806901 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5b848cfc-5296-423c-9a02-45bf4c2c850b-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.807787 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5b848cfc-5296-423c-9a02-45bf4c2c850b-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.813758 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b848cfc-5296-423c-9a02-45bf4c2c850b-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.818064 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b848cfc-5296-423c-9a02-45bf4c2c850b-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.867494 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tswft\" (UniqueName: \"kubernetes.io/projected/5b848cfc-5296-423c-9a02-45bf4c2c850b-kube-api-access-tswft\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:30 crc kubenswrapper[4703]: I0130 12:16:30.911617 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5b848cfc-5296-423c-9a02-45bf4c2c850b\") " pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:31 crc kubenswrapper[4703]: I0130 12:16:31.148786 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 30 12:16:32 crc kubenswrapper[4703]: I0130 12:16:32.180614 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-rjbtf"] Jan 30 12:16:32 crc kubenswrapper[4703]: I0130 12:16:32.783432 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-rjbtf" event={"ID":"fd146d96-e737-48a6-a3e4-d414913da90f","Type":"ContainerStarted","Data":"5932d5a8339a78a9a259745e376d1c73087026e88d1d476b9f0afa4a78d17219"} Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.201526 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-tg9tq"] Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.417334 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-jnlms"] Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.419235 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.426495 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.446376 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-jnlms"] Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.484598 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.484692 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-combined-ca-bundle\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.484749 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-ovn-rundir\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.484785 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-config\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.484827 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nd6mw\" (UniqueName: \"kubernetes.io/projected/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-kube-api-access-nd6mw\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.484862 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-ovs-rundir\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.586989 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.587067 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-combined-ca-bundle\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.587111 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-ovn-rundir\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.587155 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-config\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.587186 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nd6mw\" (UniqueName: \"kubernetes.io/projected/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-kube-api-access-nd6mw\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.587212 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-ovs-rundir\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.588160 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-ovs-rundir\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.591198 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-ovn-rundir\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.592625 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-config\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.606331 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.612706 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-combined-ca-bundle\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.667581 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nd6mw\" (UniqueName: \"kubernetes.io/projected/64bd0ec4-dbb4-4b67-b6ae-312c7667f598-kube-api-access-nd6mw\") pod \"ovn-controller-metrics-jnlms\" (UID: \"64bd0ec4-dbb4-4b67-b6ae-312c7667f598\") " pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:33 crc kubenswrapper[4703]: I0130 12:16:33.856246 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-jnlms" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.065342 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.298794 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.344761 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.345093 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.352018 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-lwgl6" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.352276 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.352916 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.353811 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 30 12:16:34 crc kubenswrapper[4703]: W0130 12:16:34.392298 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b848cfc_5296_423c_9a02_45bf4c2c850b.slice/crio-a73d37ddd2936e6b3926e5f368b5419edd16b014e3e88fbdfba9c797ff038ba2 WatchSource:0}: Error finding container a73d37ddd2936e6b3926e5f368b5419edd16b014e3e88fbdfba9c797ff038ba2: Status 404 returned error can't find the container with id a73d37ddd2936e6b3926e5f368b5419edd16b014e3e88fbdfba9c797ff038ba2 Jan 30 12:16:34 crc kubenswrapper[4703]: W0130 12:16:34.408262 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod20007227_8914_4ec9_ad56_1bf477408476.slice/crio-bd5798280bd756953432e11c50a243991f2b171849e6284fb8ecd3d8923d7068 WatchSource:0}: Error finding container bd5798280bd756953432e11c50a243991f2b171849e6284fb8ecd3d8923d7068: Status 404 returned error can't find the container with id bd5798280bd756953432e11c50a243991f2b171849e6284fb8ecd3d8923d7068 Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.439728 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.439798 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.439839 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-config\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.439860 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.439887 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.439920 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.440068 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56t9q\" (UniqueName: \"kubernetes.io/projected/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-kube-api-access-56t9q\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.440109 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.543570 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.543637 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-config\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.543672 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.543708 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.543738 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.543901 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56t9q\" (UniqueName: \"kubernetes.io/projected/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-kube-api-access-56t9q\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.543934 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.544061 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.545453 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.545828 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.547734 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-config\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.551281 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.553975 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.555708 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.560827 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.563671 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56t9q\" (UniqueName: \"kubernetes.io/projected/8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c-kube-api-access-56t9q\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.576476 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c\") " pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.729309 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.830308 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-tg9tq" event={"ID":"20007227-8914-4ec9-ad56-1bf477408476","Type":"ContainerStarted","Data":"bd5798280bd756953432e11c50a243991f2b171849e6284fb8ecd3d8923d7068"} Jan 30 12:16:34 crc kubenswrapper[4703]: I0130 12:16:34.835600 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5b848cfc-5296-423c-9a02-45bf4c2c850b","Type":"ContainerStarted","Data":"a73d37ddd2936e6b3926e5f368b5419edd16b014e3e88fbdfba9c797ff038ba2"} Jan 30 12:16:42 crc kubenswrapper[4703]: I0130 12:16:42.823142 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:16:42 crc kubenswrapper[4703]: I0130 12:16:42.823698 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:16:48 crc kubenswrapper[4703]: I0130 12:16:48.467794 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-jnlms"] Jan 30 12:16:50 crc kubenswrapper[4703]: E0130 12:16:50.186313 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Jan 30 12:16:50 crc kubenswrapper[4703]: E0130 12:16:50.187392 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-n94qd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:16:50 crc kubenswrapper[4703]: E0130 12:16:50.188571 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" Jan 30 12:16:50 crc kubenswrapper[4703]: E0130 12:16:50.984968 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" Jan 30 12:16:59 crc kubenswrapper[4703]: E0130 12:16:59.837940 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Jan 30 12:16:59 crc kubenswrapper[4703]: E0130 12:16:59.841020 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:ncch99h584h9dh695h58fh656h699h77hcch5d8h88h5fbh5b8h59fh54dh654h57bh5ddh57dh8fhfch5c8h68ch54h65dh8dh658hc6h5c8h97h8bq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wzkw9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(a5650e0f-dfd5-4fc5-a728-b1eab91a0d23): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:16:59 crc kubenswrapper[4703]: E0130 12:16:59.842913 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="a5650e0f-dfd5-4fc5-a728-b1eab91a0d23" Jan 30 12:16:59 crc kubenswrapper[4703]: E0130 12:16:59.860818 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Jan 30 12:16:59 crc kubenswrapper[4703]: E0130 12:16:59.861154 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s5zj5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(e5c696fa-999f-48c2-bf1a-e015ec7e7ab1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:16:59 crc kubenswrapper[4703]: E0130 12:16:59.862451 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="e5c696fa-999f-48c2-bf1a-e015ec7e7ab1" Jan 30 12:16:59 crc kubenswrapper[4703]: E0130 12:16:59.872001 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Jan 30 12:16:59 crc kubenswrapper[4703]: E0130 12:16:59.872173 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gbd8j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(0a06c5c6-2219-4e79-ae66-2d706ce1e8e5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:16:59 crc kubenswrapper[4703]: E0130 12:16:59.873386 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" Jan 30 12:17:00 crc kubenswrapper[4703]: I0130 12:17:00.059940 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-jnlms" event={"ID":"64bd0ec4-dbb4-4b67-b6ae-312c7667f598","Type":"ContainerStarted","Data":"5a48496e30ed45a0a238da8fb79ab7962d806a126cd35f2078bb35c81dad1cdd"} Jan 30 12:17:00 crc kubenswrapper[4703]: E0130 12:17:00.061629 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="e5c696fa-999f-48c2-bf1a-e015ec7e7ab1" Jan 30 12:17:00 crc kubenswrapper[4703]: E0130 12:17:00.062551 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="a5650e0f-dfd5-4fc5-a728-b1eab91a0d23" Jan 30 12:17:00 crc kubenswrapper[4703]: E0130 12:17:00.063478 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" Jan 30 12:17:00 crc kubenswrapper[4703]: E0130 12:17:00.155812 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified" Jan 30 12:17:00 crc kubenswrapper[4703]: E0130 12:17:00.156587 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovn-controller,Image:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,Command:[ovn-controller --pidfile unix:/run/openvswitch/db.sock --certificate=/etc/pki/tls/certs/ovndb.crt --private-key=/etc/pki/tls/private/ovndb.key --ca-cert=/etc/pki/tls/certs/ovndbca.crt],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n698h598h5fbhdch68bh685h559h66dhb7h5b4h77h87h647h5cbh588h59fh67h59h545hdbhdh549h5b6h5bh5dch8h68bh5c6h647h89h56dh678q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run-ovn,ReadOnly:false,MountPath:/var/run/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log-ovn,ReadOnly:false,MountPath:/var/log/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-286j4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_liveness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_readiness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/share/ovn/scripts/ovn-ctl stop_controller],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-rjbtf_openstack(fd146d96-e737-48a6-a3e4-d414913da90f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:17:00 crc kubenswrapper[4703]: E0130 12:17:00.158504 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-rjbtf" podUID="fd146d96-e737-48a6-a3e4-d414913da90f" Jan 30 12:17:01 crc kubenswrapper[4703]: E0130 12:17:01.091993 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified\\\"\"" pod="openstack/ovn-controller-rjbtf" podUID="fd146d96-e737-48a6-a3e4-d414913da90f" Jan 30 12:17:05 crc kubenswrapper[4703]: E0130 12:17:05.585706 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 30 12:17:05 crc kubenswrapper[4703]: E0130 12:17:05.586812 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nslw6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-qmg2h_openstack(1f2a65f3-c251-40ad-9900-2b96f6298f79): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:17:05 crc kubenswrapper[4703]: E0130 12:17:05.587884 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-qmg2h" podUID="1f2a65f3-c251-40ad-9900-2b96f6298f79" Jan 30 12:17:05 crc kubenswrapper[4703]: E0130 12:17:05.650562 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 30 12:17:05 crc kubenswrapper[4703]: E0130 12:17:05.650872 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nfdh5dfhb6h64h676hc4h78h97h669h54chfbh696hb5h54bh5d4h6bh64h644h677h584h5cbh698h9dh5bbh5f8h5b8hcdh644h5c7h694hbfh589q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jllgf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5ccc8479f9-77d6s_openstack(dfd6f4da-4223-4439-83d7-3204e7ad5803): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:17:05 crc kubenswrapper[4703]: E0130 12:17:05.652658 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-5ccc8479f9-77d6s" podUID="dfd6f4da-4223-4439-83d7-3204e7ad5803" Jan 30 12:17:05 crc kubenswrapper[4703]: E0130 12:17:05.857019 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified" Jan 30 12:17:05 crc kubenswrapper[4703]: E0130 12:17:05.857381 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovsdbserver-nb,Image:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,Command:[/usr/bin/dumb-init],Args:[/usr/local/bin/container-scripts/setup.sh],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5c5hdfh576h64dh589h66bh66fh649hb8h56h699h74hb6h667h67hdhf5h5f5hf7hcfh54h5fdh579h5ddh9ch594hf4h66dhffhcch64dh98q,ValueFrom:nil,},EnvVar{Name:OVN_LOGDIR,Value:/tmp,ValueFrom:nil,},EnvVar{Name:OVN_RUNDIR,Value:/tmp,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovndbcluster-nb-etc-ovn,ReadOnly:false,MountPath:/etc/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tswft,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/cleanup.sh],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:20,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-nb-0_openstack(5b848cfc-5296-423c-9a02-45bf4c2c850b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:17:05 crc kubenswrapper[4703]: E0130 12:17:05.946151 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 30 12:17:05 crc kubenswrapper[4703]: E0130 12:17:05.946881 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2hcjd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-7lrrn_openstack(df2cb2b8-20fb-4df2-bf4d-499d73faa55d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:17:05 crc kubenswrapper[4703]: E0130 12:17:05.948382 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-7lrrn" podUID="df2cb2b8-20fb-4df2-bf4d-499d73faa55d" Jan 30 12:17:06 crc kubenswrapper[4703]: E0130 12:17:06.072485 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 30 12:17:06 crc kubenswrapper[4703]: E0130 12:17:06.072764 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-d2rb8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-w2bq4_openstack(cd258110-c376-4e35-a99c-8ed43e5d7631): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:17:06 crc kubenswrapper[4703]: E0130 12:17:06.074086 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-w2bq4" podUID="cd258110-c376-4e35-a99c-8ed43e5d7631" Jan 30 12:17:06 crc kubenswrapper[4703]: E0130 12:17:06.144542 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-5ccc8479f9-77d6s" podUID="dfd6f4da-4223-4439-83d7-3204e7ad5803" Jan 30 12:17:06 crc kubenswrapper[4703]: E0130 12:17:06.144939 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-qmg2h" podUID="1f2a65f3-c251-40ad-9900-2b96f6298f79" Jan 30 12:17:06 crc kubenswrapper[4703]: I0130 12:17:06.486430 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 30 12:17:06 crc kubenswrapper[4703]: E0130 12:17:06.780980 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Jan 30 12:17:06 crc kubenswrapper[4703]: E0130 12:17:06.781115 4703 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0" Jan 30 12:17:06 crc kubenswrapper[4703]: E0130 12:17:06.781384 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2cbff,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(7847f607-512e-440a-af08-8fc3104621b6): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 30 12:17:06 crc kubenswrapper[4703]: E0130 12:17:06.782680 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="7847f607-512e-440a-af08-8fc3104621b6" Jan 30 12:17:06 crc kubenswrapper[4703]: W0130 12:17:06.827136 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c0494a8_e5f2_42bf_bcd4_f81c0f5f356c.slice/crio-be7ff6873346269f8d20503db3d800f38c29f96bdef47919fbf40729c8315b94 WatchSource:0}: Error finding container be7ff6873346269f8d20503db3d800f38c29f96bdef47919fbf40729c8315b94: Status 404 returned error can't find the container with id be7ff6873346269f8d20503db3d800f38c29f96bdef47919fbf40729c8315b94 Jan 30 12:17:06 crc kubenswrapper[4703]: I0130 12:17:06.887005 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-7lrrn" Jan 30 12:17:06 crc kubenswrapper[4703]: I0130 12:17:06.896170 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-w2bq4" Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.013402 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cd258110-c376-4e35-a99c-8ed43e5d7631-dns-svc\") pod \"cd258110-c376-4e35-a99c-8ed43e5d7631\" (UID: \"cd258110-c376-4e35-a99c-8ed43e5d7631\") " Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.013595 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd258110-c376-4e35-a99c-8ed43e5d7631-config\") pod \"cd258110-c376-4e35-a99c-8ed43e5d7631\" (UID: \"cd258110-c376-4e35-a99c-8ed43e5d7631\") " Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.013638 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2hcjd\" (UniqueName: \"kubernetes.io/projected/df2cb2b8-20fb-4df2-bf4d-499d73faa55d-kube-api-access-2hcjd\") pod \"df2cb2b8-20fb-4df2-bf4d-499d73faa55d\" (UID: \"df2cb2b8-20fb-4df2-bf4d-499d73faa55d\") " Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.013782 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df2cb2b8-20fb-4df2-bf4d-499d73faa55d-config\") pod \"df2cb2b8-20fb-4df2-bf4d-499d73faa55d\" (UID: \"df2cb2b8-20fb-4df2-bf4d-499d73faa55d\") " Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.013853 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2rb8\" (UniqueName: \"kubernetes.io/projected/cd258110-c376-4e35-a99c-8ed43e5d7631-kube-api-access-d2rb8\") pod \"cd258110-c376-4e35-a99c-8ed43e5d7631\" (UID: \"cd258110-c376-4e35-a99c-8ed43e5d7631\") " Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.014300 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd258110-c376-4e35-a99c-8ed43e5d7631-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cd258110-c376-4e35-a99c-8ed43e5d7631" (UID: "cd258110-c376-4e35-a99c-8ed43e5d7631"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.014406 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df2cb2b8-20fb-4df2-bf4d-499d73faa55d-config" (OuterVolumeSpecName: "config") pod "df2cb2b8-20fb-4df2-bf4d-499d73faa55d" (UID: "df2cb2b8-20fb-4df2-bf4d-499d73faa55d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.014876 4703 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cd258110-c376-4e35-a99c-8ed43e5d7631-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.014901 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df2cb2b8-20fb-4df2-bf4d-499d73faa55d-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.015271 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cd258110-c376-4e35-a99c-8ed43e5d7631-config" (OuterVolumeSpecName: "config") pod "cd258110-c376-4e35-a99c-8ed43e5d7631" (UID: "cd258110-c376-4e35-a99c-8ed43e5d7631"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.021459 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df2cb2b8-20fb-4df2-bf4d-499d73faa55d-kube-api-access-2hcjd" (OuterVolumeSpecName: "kube-api-access-2hcjd") pod "df2cb2b8-20fb-4df2-bf4d-499d73faa55d" (UID: "df2cb2b8-20fb-4df2-bf4d-499d73faa55d"). InnerVolumeSpecName "kube-api-access-2hcjd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.028487 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd258110-c376-4e35-a99c-8ed43e5d7631-kube-api-access-d2rb8" (OuterVolumeSpecName: "kube-api-access-d2rb8") pod "cd258110-c376-4e35-a99c-8ed43e5d7631" (UID: "cd258110-c376-4e35-a99c-8ed43e5d7631"). InnerVolumeSpecName "kube-api-access-d2rb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.116792 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2rb8\" (UniqueName: \"kubernetes.io/projected/cd258110-c376-4e35-a99c-8ed43e5d7631-kube-api-access-d2rb8\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.116848 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cd258110-c376-4e35-a99c-8ed43e5d7631-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.116868 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2hcjd\" (UniqueName: \"kubernetes.io/projected/df2cb2b8-20fb-4df2-bf4d-499d73faa55d-kube-api-access-2hcjd\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.153565 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c","Type":"ContainerStarted","Data":"be7ff6873346269f8d20503db3d800f38c29f96bdef47919fbf40729c8315b94"} Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.155220 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-w2bq4" event={"ID":"cd258110-c376-4e35-a99c-8ed43e5d7631","Type":"ContainerDied","Data":"4843004a828eda8ff56e5a2861b9c6a266ef451f774fef0c65167995c8922ed2"} Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.155282 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-w2bq4" Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.158642 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-7lrrn" event={"ID":"df2cb2b8-20fb-4df2-bf4d-499d73faa55d","Type":"ContainerDied","Data":"c54568367a62580080e9edfbb2834644ae5a936a58300d074d1930bb3716e131"} Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.158718 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-7lrrn" Jan 30 12:17:07 crc kubenswrapper[4703]: E0130 12:17:07.160950 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0\\\"\"" pod="openstack/kube-state-metrics-0" podUID="7847f607-512e-440a-af08-8fc3104621b6" Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.239991 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7lrrn"] Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.261422 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-7lrrn"] Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.282082 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-w2bq4"] Jan 30 12:17:07 crc kubenswrapper[4703]: I0130 12:17:07.291077 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-w2bq4"] Jan 30 12:17:08 crc kubenswrapper[4703]: E0130 12:17:08.113499 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-nb-0" podUID="5b848cfc-5296-423c-9a02-45bf4c2c850b" Jan 30 12:17:08 crc kubenswrapper[4703]: I0130 12:17:08.183579 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f0ae0323-f870-408b-b688-df1b4e3e8da6","Type":"ContainerStarted","Data":"b49a7e7a33c5e2dee95a36c4f68438bcbf035474f4a9227cceb40194d08c7bb0"} Jan 30 12:17:08 crc kubenswrapper[4703]: I0130 12:17:08.192991 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5b848cfc-5296-423c-9a02-45bf4c2c850b","Type":"ContainerStarted","Data":"0f15c5025b2671cd91c631e0b79aba25f684e2a6a54f58c1eaf6cfc695333025"} Jan 30 12:17:08 crc kubenswrapper[4703]: E0130 12:17:08.196075 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="5b848cfc-5296-423c-9a02-45bf4c2c850b" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.100233 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd258110-c376-4e35-a99c-8ed43e5d7631" path="/var/lib/kubelet/pods/cd258110-c376-4e35-a99c-8ed43e5d7631/volumes" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.101544 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df2cb2b8-20fb-4df2-bf4d-499d73faa55d" path="/var/lib/kubelet/pods/df2cb2b8-20fb-4df2-bf4d-499d73faa55d/volumes" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.211638 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c","Type":"ContainerStarted","Data":"f19cd48177cf8facd8ae06eed5f9a6b38422629ca8f975008c6a7fa7c1b09cdb"} Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.211705 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c","Type":"ContainerStarted","Data":"ed0b5c00f4e2c2036701370ba806db97f6f10056cd26dfd74a27f16e658be32b"} Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.214875 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-jnlms" event={"ID":"64bd0ec4-dbb4-4b67-b6ae-312c7667f598","Type":"ContainerStarted","Data":"cb4345fe674f883bab48adcc2cb256f47fdffa8e395422b271b44dc1f72243d6"} Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.216929 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-tg9tq" event={"ID":"20007227-8914-4ec9-ad56-1bf477408476","Type":"ContainerStarted","Data":"360cdf346ea8e97e0ad93d036c42246a5a376fb76d9055969f9bf8156010af19"} Jan 30 12:17:09 crc kubenswrapper[4703]: E0130 12:17:09.219533 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="5b848cfc-5296-423c-9a02-45bf4c2c850b" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.285737 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-jnlms" podStartSLOduration=28.337391775 podStartE2EDuration="36.285702237s" podCreationTimestamp="2026-01-30 12:16:33 +0000 UTC" firstStartedPulling="2026-01-30 12:16:59.87454676 +0000 UTC m=+1255.652368414" lastFinishedPulling="2026-01-30 12:17:07.822857222 +0000 UTC m=+1263.600678876" observedRunningTime="2026-01-30 12:17:09.271890655 +0000 UTC m=+1265.049712309" watchObservedRunningTime="2026-01-30 12:17:09.285702237 +0000 UTC m=+1265.063523891" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.286933 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=35.22149312 podStartE2EDuration="36.286900689s" podCreationTimestamp="2026-01-30 12:16:33 +0000 UTC" firstStartedPulling="2026-01-30 12:17:06.831003443 +0000 UTC m=+1262.608825097" lastFinishedPulling="2026-01-30 12:17:07.896411012 +0000 UTC m=+1263.674232666" observedRunningTime="2026-01-30 12:17:09.245206007 +0000 UTC m=+1265.023027671" watchObservedRunningTime="2026-01-30 12:17:09.286900689 +0000 UTC m=+1265.064722343" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.639342 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qmg2h"] Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.705885 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-ncbx2"] Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.713765 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.719293 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.729638 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.737927 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-ncbx2"] Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.784753 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-ncbx2\" (UID: \"647208fa-b97a-4dad-964e-75f8ab7df2e9\") " pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.784908 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzfns\" (UniqueName: \"kubernetes.io/projected/647208fa-b97a-4dad-964e-75f8ab7df2e9-kube-api-access-qzfns\") pod \"dnsmasq-dns-7fd796d7df-ncbx2\" (UID: \"647208fa-b97a-4dad-964e-75f8ab7df2e9\") " pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.785025 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-ncbx2\" (UID: \"647208fa-b97a-4dad-964e-75f8ab7df2e9\") " pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.785088 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-config\") pod \"dnsmasq-dns-7fd796d7df-ncbx2\" (UID: \"647208fa-b97a-4dad-964e-75f8ab7df2e9\") " pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.887369 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-ncbx2\" (UID: \"647208fa-b97a-4dad-964e-75f8ab7df2e9\") " pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.887476 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-config\") pod \"dnsmasq-dns-7fd796d7df-ncbx2\" (UID: \"647208fa-b97a-4dad-964e-75f8ab7df2e9\") " pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.888645 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-dns-svc\") pod \"dnsmasq-dns-7fd796d7df-ncbx2\" (UID: \"647208fa-b97a-4dad-964e-75f8ab7df2e9\") " pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.888676 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-config\") pod \"dnsmasq-dns-7fd796d7df-ncbx2\" (UID: \"647208fa-b97a-4dad-964e-75f8ab7df2e9\") " pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.888733 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-ncbx2\" (UID: \"647208fa-b97a-4dad-964e-75f8ab7df2e9\") " pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.888798 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzfns\" (UniqueName: \"kubernetes.io/projected/647208fa-b97a-4dad-964e-75f8ab7df2e9-kube-api-access-qzfns\") pod \"dnsmasq-dns-7fd796d7df-ncbx2\" (UID: \"647208fa-b97a-4dad-964e-75f8ab7df2e9\") " pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.890572 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-ovsdbserver-nb\") pod \"dnsmasq-dns-7fd796d7df-ncbx2\" (UID: \"647208fa-b97a-4dad-964e-75f8ab7df2e9\") " pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.900148 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-77d6s"] Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.977318 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-x8lvt"] Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.979196 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:09 crc kubenswrapper[4703]: I0130 12:17:09.985823 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.007414 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzfns\" (UniqueName: \"kubernetes.io/projected/647208fa-b97a-4dad-964e-75f8ab7df2e9-kube-api-access-qzfns\") pod \"dnsmasq-dns-7fd796d7df-ncbx2\" (UID: \"647208fa-b97a-4dad-964e-75f8ab7df2e9\") " pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.031242 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-x8lvt"] Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.047630 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.093546 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-x8lvt\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.093635 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-x8lvt\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.093710 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4lgq5\" (UniqueName: \"kubernetes.io/projected/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-kube-api-access-4lgq5\") pod \"dnsmasq-dns-86db49b7ff-x8lvt\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.093736 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-config\") pod \"dnsmasq-dns-86db49b7ff-x8lvt\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.093836 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-x8lvt\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.195512 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4lgq5\" (UniqueName: \"kubernetes.io/projected/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-kube-api-access-4lgq5\") pod \"dnsmasq-dns-86db49b7ff-x8lvt\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.196088 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-config\") pod \"dnsmasq-dns-86db49b7ff-x8lvt\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.196192 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-x8lvt\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.196347 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-x8lvt\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.197548 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-config\") pod \"dnsmasq-dns-86db49b7ff-x8lvt\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.197845 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-x8lvt\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.198616 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-x8lvt\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.199757 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-x8lvt\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.201720 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-x8lvt\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.249664 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4lgq5\" (UniqueName: \"kubernetes.io/projected/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-kube-api-access-4lgq5\") pod \"dnsmasq-dns-86db49b7ff-x8lvt\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.289417 4703 generic.go:334] "Generic (PLEG): container finished" podID="20007227-8914-4ec9-ad56-1bf477408476" containerID="360cdf346ea8e97e0ad93d036c42246a5a376fb76d9055969f9bf8156010af19" exitCode=0 Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.289677 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-tg9tq" event={"ID":"20007227-8914-4ec9-ad56-1bf477408476","Type":"ContainerDied","Data":"360cdf346ea8e97e0ad93d036c42246a5a376fb76d9055969f9bf8156010af19"} Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.293975 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-qmg2h" event={"ID":"1f2a65f3-c251-40ad-9900-2b96f6298f79","Type":"ContainerDied","Data":"8ee0b934528a7bcdb6d69b57d6272984ce2fa6ee882e2ffc1e69acda45b5a8a1"} Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.294058 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8ee0b934528a7bcdb6d69b57d6272984ce2fa6ee882e2ffc1e69acda45b5a8a1" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.299296 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b","Type":"ContainerStarted","Data":"fa64bfe6d18e3452d8e0ab152d7a52f728f181e2221def319dc3e38ef1880ae1"} Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.359011 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.460147 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-qmg2h" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.469508 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-ncbx2"] Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.475444 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-77d6s" Jan 30 12:17:10 crc kubenswrapper[4703]: W0130 12:17:10.477754 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod647208fa_b97a_4dad_964e_75f8ab7df2e9.slice/crio-38dbf53c1fe7962f62f30f0cdd2a538937461b8dc187bb3cd8d55c9500f9e131 WatchSource:0}: Error finding container 38dbf53c1fe7962f62f30f0cdd2a538937461b8dc187bb3cd8d55c9500f9e131: Status 404 returned error can't find the container with id 38dbf53c1fe7962f62f30f0cdd2a538937461b8dc187bb3cd8d55c9500f9e131 Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.513060 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f2a65f3-c251-40ad-9900-2b96f6298f79-config\") pod \"1f2a65f3-c251-40ad-9900-2b96f6298f79\" (UID: \"1f2a65f3-c251-40ad-9900-2b96f6298f79\") " Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.513167 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jllgf\" (UniqueName: \"kubernetes.io/projected/dfd6f4da-4223-4439-83d7-3204e7ad5803-kube-api-access-jllgf\") pod \"dfd6f4da-4223-4439-83d7-3204e7ad5803\" (UID: \"dfd6f4da-4223-4439-83d7-3204e7ad5803\") " Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.513421 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1f2a65f3-c251-40ad-9900-2b96f6298f79-dns-svc\") pod \"1f2a65f3-c251-40ad-9900-2b96f6298f79\" (UID: \"1f2a65f3-c251-40ad-9900-2b96f6298f79\") " Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.513461 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfd6f4da-4223-4439-83d7-3204e7ad5803-config\") pod \"dfd6f4da-4223-4439-83d7-3204e7ad5803\" (UID: \"dfd6f4da-4223-4439-83d7-3204e7ad5803\") " Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.513482 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nslw6\" (UniqueName: \"kubernetes.io/projected/1f2a65f3-c251-40ad-9900-2b96f6298f79-kube-api-access-nslw6\") pod \"1f2a65f3-c251-40ad-9900-2b96f6298f79\" (UID: \"1f2a65f3-c251-40ad-9900-2b96f6298f79\") " Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.513523 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dfd6f4da-4223-4439-83d7-3204e7ad5803-dns-svc\") pod \"dfd6f4da-4223-4439-83d7-3204e7ad5803\" (UID: \"dfd6f4da-4223-4439-83d7-3204e7ad5803\") " Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.514363 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f2a65f3-c251-40ad-9900-2b96f6298f79-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1f2a65f3-c251-40ad-9900-2b96f6298f79" (UID: "1f2a65f3-c251-40ad-9900-2b96f6298f79"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.514583 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfd6f4da-4223-4439-83d7-3204e7ad5803-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "dfd6f4da-4223-4439-83d7-3204e7ad5803" (UID: "dfd6f4da-4223-4439-83d7-3204e7ad5803"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.514996 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfd6f4da-4223-4439-83d7-3204e7ad5803-config" (OuterVolumeSpecName: "config") pod "dfd6f4da-4223-4439-83d7-3204e7ad5803" (UID: "dfd6f4da-4223-4439-83d7-3204e7ad5803"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.515265 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f2a65f3-c251-40ad-9900-2b96f6298f79-config" (OuterVolumeSpecName: "config") pod "1f2a65f3-c251-40ad-9900-2b96f6298f79" (UID: "1f2a65f3-c251-40ad-9900-2b96f6298f79"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.522825 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfd6f4da-4223-4439-83d7-3204e7ad5803-kube-api-access-jllgf" (OuterVolumeSpecName: "kube-api-access-jllgf") pod "dfd6f4da-4223-4439-83d7-3204e7ad5803" (UID: "dfd6f4da-4223-4439-83d7-3204e7ad5803"). InnerVolumeSpecName "kube-api-access-jllgf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.526917 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f2a65f3-c251-40ad-9900-2b96f6298f79-kube-api-access-nslw6" (OuterVolumeSpecName: "kube-api-access-nslw6") pod "1f2a65f3-c251-40ad-9900-2b96f6298f79" (UID: "1f2a65f3-c251-40ad-9900-2b96f6298f79"). InnerVolumeSpecName "kube-api-access-nslw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.616079 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1f2a65f3-c251-40ad-9900-2b96f6298f79-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.616133 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jllgf\" (UniqueName: \"kubernetes.io/projected/dfd6f4da-4223-4439-83d7-3204e7ad5803-kube-api-access-jllgf\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.616147 4703 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1f2a65f3-c251-40ad-9900-2b96f6298f79-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.616158 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dfd6f4da-4223-4439-83d7-3204e7ad5803-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.616172 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nslw6\" (UniqueName: \"kubernetes.io/projected/1f2a65f3-c251-40ad-9900-2b96f6298f79-kube-api-access-nslw6\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.616182 4703 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dfd6f4da-4223-4439-83d7-3204e7ad5803-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.717649 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-x8lvt"] Jan 30 12:17:10 crc kubenswrapper[4703]: I0130 12:17:10.731068 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 30 12:17:10 crc kubenswrapper[4703]: W0130 12:17:10.842912 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5fb4520e_7665_4d6b_8238_e3e3cf2a1306.slice/crio-9eedb1b7ddf1164dede5de5999cecbdd11be6199b5f5ad6d17bb415fa60ad974 WatchSource:0}: Error finding container 9eedb1b7ddf1164dede5de5999cecbdd11be6199b5f5ad6d17bb415fa60ad974: Status 404 returned error can't find the container with id 9eedb1b7ddf1164dede5de5999cecbdd11be6199b5f5ad6d17bb415fa60ad974 Jan 30 12:17:11 crc kubenswrapper[4703]: I0130 12:17:11.319771 4703 generic.go:334] "Generic (PLEG): container finished" podID="647208fa-b97a-4dad-964e-75f8ab7df2e9" containerID="f0c69f64e27fe1ad6c5fdfb1487a5c3702ad792ce64dbe866533402c89cf3bea" exitCode=0 Jan 30 12:17:11 crc kubenswrapper[4703]: I0130 12:17:11.320662 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" event={"ID":"647208fa-b97a-4dad-964e-75f8ab7df2e9","Type":"ContainerDied","Data":"f0c69f64e27fe1ad6c5fdfb1487a5c3702ad792ce64dbe866533402c89cf3bea"} Jan 30 12:17:11 crc kubenswrapper[4703]: I0130 12:17:11.320747 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" event={"ID":"647208fa-b97a-4dad-964e-75f8ab7df2e9","Type":"ContainerStarted","Data":"38dbf53c1fe7962f62f30f0cdd2a538937461b8dc187bb3cd8d55c9500f9e131"} Jan 30 12:17:11 crc kubenswrapper[4703]: I0130 12:17:11.330285 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-tg9tq" event={"ID":"20007227-8914-4ec9-ad56-1bf477408476","Type":"ContainerStarted","Data":"49e4fc844cb3ffcf122a9e1cc130f82fde0cf919eadc1e8dc4300b237b949891"} Jan 30 12:17:11 crc kubenswrapper[4703]: I0130 12:17:11.330354 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-tg9tq" event={"ID":"20007227-8914-4ec9-ad56-1bf477408476","Type":"ContainerStarted","Data":"0e99c1bc2f32593327fb31cd9fd831cdccf1876a8bd12a6d79c354e4c7e45820"} Jan 30 12:17:11 crc kubenswrapper[4703]: I0130 12:17:11.330548 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:17:11 crc kubenswrapper[4703]: I0130 12:17:11.330590 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:17:11 crc kubenswrapper[4703]: I0130 12:17:11.334527 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8e865e8b-7723-4aed-b51c-ce7a8da59d13","Type":"ContainerStarted","Data":"5ab22e287fac6accde52e20f9994a46a2ea01bfd85cc3568af2592e83a3bca79"} Jan 30 12:17:11 crc kubenswrapper[4703]: I0130 12:17:11.342379 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" event={"ID":"5fb4520e-7665-4d6b-8238-e3e3cf2a1306","Type":"ContainerStarted","Data":"9eedb1b7ddf1164dede5de5999cecbdd11be6199b5f5ad6d17bb415fa60ad974"} Jan 30 12:17:11 crc kubenswrapper[4703]: I0130 12:17:11.349228 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-77d6s" Jan 30 12:17:11 crc kubenswrapper[4703]: I0130 12:17:11.349374 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-77d6s" event={"ID":"dfd6f4da-4223-4439-83d7-3204e7ad5803","Type":"ContainerDied","Data":"045d96fc8724f288e4fcdd26edb5410b4b0e0232111db1eae3599a15d675f1a2"} Jan 30 12:17:11 crc kubenswrapper[4703]: I0130 12:17:11.349501 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-qmg2h" Jan 30 12:17:11 crc kubenswrapper[4703]: I0130 12:17:11.499751 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-tg9tq" podStartSLOduration=8.385257861 podStartE2EDuration="41.499719664s" podCreationTimestamp="2026-01-30 12:16:30 +0000 UTC" firstStartedPulling="2026-01-30 12:16:34.438543609 +0000 UTC m=+1230.216365273" lastFinishedPulling="2026-01-30 12:17:07.553005422 +0000 UTC m=+1263.330827076" observedRunningTime="2026-01-30 12:17:11.420712958 +0000 UTC m=+1267.198534612" watchObservedRunningTime="2026-01-30 12:17:11.499719664 +0000 UTC m=+1267.277541318" Jan 30 12:17:11 crc kubenswrapper[4703]: I0130 12:17:11.548348 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-77d6s"] Jan 30 12:17:11 crc kubenswrapper[4703]: I0130 12:17:11.567662 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-77d6s"] Jan 30 12:17:11 crc kubenswrapper[4703]: I0130 12:17:11.593736 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qmg2h"] Jan 30 12:17:11 crc kubenswrapper[4703]: I0130 12:17:11.602299 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-qmg2h"] Jan 30 12:17:12 crc kubenswrapper[4703]: I0130 12:17:12.360497 4703 generic.go:334] "Generic (PLEG): container finished" podID="5fb4520e-7665-4d6b-8238-e3e3cf2a1306" containerID="380f775541011a72cfca0088e38a836dc71885f2bcebcc88d005a9f77516946d" exitCode=0 Jan 30 12:17:12 crc kubenswrapper[4703]: I0130 12:17:12.360584 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" event={"ID":"5fb4520e-7665-4d6b-8238-e3e3cf2a1306","Type":"ContainerDied","Data":"380f775541011a72cfca0088e38a836dc71885f2bcebcc88d005a9f77516946d"} Jan 30 12:17:12 crc kubenswrapper[4703]: I0130 12:17:12.369429 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" event={"ID":"647208fa-b97a-4dad-964e-75f8ab7df2e9","Type":"ContainerStarted","Data":"98e4ffddb224966d389a96df45189053c91d3d1dfbb2379caf22dfdab44aaab4"} Jan 30 12:17:12 crc kubenswrapper[4703]: I0130 12:17:12.369742 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:12 crc kubenswrapper[4703]: I0130 12:17:12.372611 4703 generic.go:334] "Generic (PLEG): container finished" podID="f0ae0323-f870-408b-b688-df1b4e3e8da6" containerID="b49a7e7a33c5e2dee95a36c4f68438bcbf035474f4a9227cceb40194d08c7bb0" exitCode=0 Jan 30 12:17:12 crc kubenswrapper[4703]: I0130 12:17:12.372796 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f0ae0323-f870-408b-b688-df1b4e3e8da6","Type":"ContainerDied","Data":"b49a7e7a33c5e2dee95a36c4f68438bcbf035474f4a9227cceb40194d08c7bb0"} Jan 30 12:17:12 crc kubenswrapper[4703]: I0130 12:17:12.412550 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" podStartSLOduration=2.91947563 podStartE2EDuration="3.412520867s" podCreationTimestamp="2026-01-30 12:17:09 +0000 UTC" firstStartedPulling="2026-01-30 12:17:10.490780095 +0000 UTC m=+1266.268601739" lastFinishedPulling="2026-01-30 12:17:10.983825322 +0000 UTC m=+1266.761646976" observedRunningTime="2026-01-30 12:17:12.407452761 +0000 UTC m=+1268.185274415" watchObservedRunningTime="2026-01-30 12:17:12.412520867 +0000 UTC m=+1268.190342511" Jan 30 12:17:12 crc kubenswrapper[4703]: I0130 12:17:12.823258 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:17:12 crc kubenswrapper[4703]: I0130 12:17:12.823654 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:17:13 crc kubenswrapper[4703]: I0130 12:17:13.097935 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f2a65f3-c251-40ad-9900-2b96f6298f79" path="/var/lib/kubelet/pods/1f2a65f3-c251-40ad-9900-2b96f6298f79/volumes" Jan 30 12:17:13 crc kubenswrapper[4703]: I0130 12:17:13.098908 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dfd6f4da-4223-4439-83d7-3204e7ad5803" path="/var/lib/kubelet/pods/dfd6f4da-4223-4439-83d7-3204e7ad5803/volumes" Jan 30 12:17:13 crc kubenswrapper[4703]: I0130 12:17:13.386334 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" event={"ID":"5fb4520e-7665-4d6b-8238-e3e3cf2a1306","Type":"ContainerStarted","Data":"26b5b238812b8ade1655ca3ca8903878ea93de755f5814f3bb09bdd719cae71a"} Jan 30 12:17:13 crc kubenswrapper[4703]: I0130 12:17:13.388552 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f0ae0323-f870-408b-b688-df1b4e3e8da6","Type":"ContainerStarted","Data":"a3e17320c4f389d8ef5e3aa4817aea63702866abb87ca246fb4de490c609bae8"} Jan 30 12:17:13 crc kubenswrapper[4703]: I0130 12:17:13.415966 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" podStartSLOduration=3.977672375 podStartE2EDuration="4.415940398s" podCreationTimestamp="2026-01-30 12:17:09 +0000 UTC" firstStartedPulling="2026-01-30 12:17:10.846800895 +0000 UTC m=+1266.624622549" lastFinishedPulling="2026-01-30 12:17:11.285068918 +0000 UTC m=+1267.062890572" observedRunningTime="2026-01-30 12:17:13.406801832 +0000 UTC m=+1269.184623486" watchObservedRunningTime="2026-01-30 12:17:13.415940398 +0000 UTC m=+1269.193762052" Jan 30 12:17:13 crc kubenswrapper[4703]: I0130 12:17:13.440882 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=9.655146409 podStartE2EDuration="51.440826878s" podCreationTimestamp="2026-01-30 12:16:22 +0000 UTC" firstStartedPulling="2026-01-30 12:16:25.514471308 +0000 UTC m=+1221.292292962" lastFinishedPulling="2026-01-30 12:17:07.300151777 +0000 UTC m=+1263.077973431" observedRunningTime="2026-01-30 12:17:13.433430629 +0000 UTC m=+1269.211252293" watchObservedRunningTime="2026-01-30 12:17:13.440826878 +0000 UTC m=+1269.218648532" Jan 30 12:17:13 crc kubenswrapper[4703]: I0130 12:17:13.774444 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 30 12:17:13 crc kubenswrapper[4703]: I0130 12:17:13.822063 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 30 12:17:13 crc kubenswrapper[4703]: I0130 12:17:13.927166 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 30 12:17:13 crc kubenswrapper[4703]: I0130 12:17:13.927817 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 30 12:17:14 crc kubenswrapper[4703]: I0130 12:17:14.403765 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:15 crc kubenswrapper[4703]: I0130 12:17:15.414730 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1","Type":"ContainerStarted","Data":"9db38ab146170b8e8fb021dc1b94e3597c057b944b32780a0ef2b84c900a161a"} Jan 30 12:17:16 crc kubenswrapper[4703]: I0130 12:17:16.426621 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5","Type":"ContainerStarted","Data":"481cd73606bf3baac9bc19db3659ddfb18cc1af4e8c3a636d9694a00c056d5b7"} Jan 30 12:17:16 crc kubenswrapper[4703]: I0130 12:17:16.428542 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-rjbtf" event={"ID":"fd146d96-e737-48a6-a3e4-d414913da90f","Type":"ContainerStarted","Data":"e5ebae21b1c7d17480341d3b0ac26c08c2cb665cd3a7963eaea1ac958924b4ab"} Jan 30 12:17:16 crc kubenswrapper[4703]: I0130 12:17:16.428766 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-rjbtf" Jan 30 12:17:16 crc kubenswrapper[4703]: I0130 12:17:16.430490 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"a5650e0f-dfd5-4fc5-a728-b1eab91a0d23","Type":"ContainerStarted","Data":"bfbb6fff32aa67c48d2788c215560cd4649531de8854927317d655f37248a712"} Jan 30 12:17:16 crc kubenswrapper[4703]: I0130 12:17:16.430821 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 30 12:17:16 crc kubenswrapper[4703]: I0130 12:17:16.488095 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-rjbtf" podStartSLOduration=3.289923831 podStartE2EDuration="46.488073717s" podCreationTimestamp="2026-01-30 12:16:30 +0000 UTC" firstStartedPulling="2026-01-30 12:16:32.479512974 +0000 UTC m=+1228.257334618" lastFinishedPulling="2026-01-30 12:17:15.67766285 +0000 UTC m=+1271.455484504" observedRunningTime="2026-01-30 12:17:16.482543788 +0000 UTC m=+1272.260365442" watchObservedRunningTime="2026-01-30 12:17:16.488073717 +0000 UTC m=+1272.265895371" Jan 30 12:17:16 crc kubenswrapper[4703]: I0130 12:17:16.506488 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=2.754680761 podStartE2EDuration="51.506460402s" podCreationTimestamp="2026-01-30 12:16:25 +0000 UTC" firstStartedPulling="2026-01-30 12:16:26.927810841 +0000 UTC m=+1222.705632495" lastFinishedPulling="2026-01-30 12:17:15.679590482 +0000 UTC m=+1271.457412136" observedRunningTime="2026-01-30 12:17:16.500903752 +0000 UTC m=+1272.278725406" watchObservedRunningTime="2026-01-30 12:17:16.506460402 +0000 UTC m=+1272.284282046" Jan 30 12:17:18 crc kubenswrapper[4703]: I0130 12:17:18.033888 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 30 12:17:18 crc kubenswrapper[4703]: I0130 12:17:18.126566 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 30 12:17:18 crc kubenswrapper[4703]: I0130 12:17:18.451530 4703 generic.go:334] "Generic (PLEG): container finished" podID="e5c696fa-999f-48c2-bf1a-e015ec7e7ab1" containerID="9db38ab146170b8e8fb021dc1b94e3597c057b944b32780a0ef2b84c900a161a" exitCode=0 Jan 30 12:17:18 crc kubenswrapper[4703]: I0130 12:17:18.451605 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1","Type":"ContainerDied","Data":"9db38ab146170b8e8fb021dc1b94e3597c057b944b32780a0ef2b84c900a161a"} Jan 30 12:17:18 crc kubenswrapper[4703]: I0130 12:17:18.454742 4703 generic.go:334] "Generic (PLEG): container finished" podID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerID="5ab22e287fac6accde52e20f9994a46a2ea01bfd85cc3568af2592e83a3bca79" exitCode=0 Jan 30 12:17:18 crc kubenswrapper[4703]: I0130 12:17:18.454999 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8e865e8b-7723-4aed-b51c-ce7a8da59d13","Type":"ContainerDied","Data":"5ab22e287fac6accde52e20f9994a46a2ea01bfd85cc3568af2592e83a3bca79"} Jan 30 12:17:19 crc kubenswrapper[4703]: I0130 12:17:19.471568 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"e5c696fa-999f-48c2-bf1a-e015ec7e7ab1","Type":"ContainerStarted","Data":"2ae4c7f3f9264eb0fa770a05600d43d85bd944564c891c010701a047aafd4e70"} Jan 30 12:17:19 crc kubenswrapper[4703]: I0130 12:17:19.507923 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=-9223371980.34688 podStartE2EDuration="56.507896518s" podCreationTimestamp="2026-01-30 12:16:23 +0000 UTC" firstStartedPulling="2026-01-30 12:16:26.220349403 +0000 UTC m=+1221.998171057" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:17:19.49903298 +0000 UTC m=+1275.276854654" watchObservedRunningTime="2026-01-30 12:17:19.507896518 +0000 UTC m=+1275.285718172" Jan 30 12:17:20 crc kubenswrapper[4703]: I0130 12:17:20.050396 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:20 crc kubenswrapper[4703]: I0130 12:17:20.361440 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:20 crc kubenswrapper[4703]: I0130 12:17:20.443034 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-ncbx2"] Jan 30 12:17:20 crc kubenswrapper[4703]: I0130 12:17:20.485404 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" podUID="647208fa-b97a-4dad-964e-75f8ab7df2e9" containerName="dnsmasq-dns" containerID="cri-o://98e4ffddb224966d389a96df45189053c91d3d1dfbb2379caf22dfdab44aaab4" gracePeriod=10 Jan 30 12:17:20 crc kubenswrapper[4703]: I0130 12:17:20.663460 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.001492 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-6e21-account-create-update-6fr54"] Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.004095 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6e21-account-create-update-6fr54" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.008376 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.048592 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-25gg4"] Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.050183 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-25gg4" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.075684 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-6e21-account-create-update-6fr54"] Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.085024 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-25gg4"] Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.131135 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05cf9256-9286-4808-9d1c-9e73e8fa2860-operator-scripts\") pod \"glance-6e21-account-create-update-6fr54\" (UID: \"05cf9256-9286-4808-9d1c-9e73e8fa2860\") " pod="openstack/glance-6e21-account-create-update-6fr54" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.131233 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dp2t\" (UniqueName: \"kubernetes.io/projected/05cf9256-9286-4808-9d1c-9e73e8fa2860-kube-api-access-9dp2t\") pod \"glance-6e21-account-create-update-6fr54\" (UID: \"05cf9256-9286-4808-9d1c-9e73e8fa2860\") " pod="openstack/glance-6e21-account-create-update-6fr54" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.193689 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.233627 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnbwk\" (UniqueName: \"kubernetes.io/projected/0763f436-e964-45c5-92f3-bb4cebeb01a9-kube-api-access-hnbwk\") pod \"glance-db-create-25gg4\" (UID: \"0763f436-e964-45c5-92f3-bb4cebeb01a9\") " pod="openstack/glance-db-create-25gg4" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.233750 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05cf9256-9286-4808-9d1c-9e73e8fa2860-operator-scripts\") pod \"glance-6e21-account-create-update-6fr54\" (UID: \"05cf9256-9286-4808-9d1c-9e73e8fa2860\") " pod="openstack/glance-6e21-account-create-update-6fr54" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.233790 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0763f436-e964-45c5-92f3-bb4cebeb01a9-operator-scripts\") pod \"glance-db-create-25gg4\" (UID: \"0763f436-e964-45c5-92f3-bb4cebeb01a9\") " pod="openstack/glance-db-create-25gg4" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.233844 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dp2t\" (UniqueName: \"kubernetes.io/projected/05cf9256-9286-4808-9d1c-9e73e8fa2860-kube-api-access-9dp2t\") pod \"glance-6e21-account-create-update-6fr54\" (UID: \"05cf9256-9286-4808-9d1c-9e73e8fa2860\") " pod="openstack/glance-6e21-account-create-update-6fr54" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.234815 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05cf9256-9286-4808-9d1c-9e73e8fa2860-operator-scripts\") pod \"glance-6e21-account-create-update-6fr54\" (UID: \"05cf9256-9286-4808-9d1c-9e73e8fa2860\") " pod="openstack/glance-6e21-account-create-update-6fr54" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.265380 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dp2t\" (UniqueName: \"kubernetes.io/projected/05cf9256-9286-4808-9d1c-9e73e8fa2860-kube-api-access-9dp2t\") pod \"glance-6e21-account-create-update-6fr54\" (UID: \"05cf9256-9286-4808-9d1c-9e73e8fa2860\") " pod="openstack/glance-6e21-account-create-update-6fr54" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.335002 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzfns\" (UniqueName: \"kubernetes.io/projected/647208fa-b97a-4dad-964e-75f8ab7df2e9-kube-api-access-qzfns\") pod \"647208fa-b97a-4dad-964e-75f8ab7df2e9\" (UID: \"647208fa-b97a-4dad-964e-75f8ab7df2e9\") " Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.335224 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-config\") pod \"647208fa-b97a-4dad-964e-75f8ab7df2e9\" (UID: \"647208fa-b97a-4dad-964e-75f8ab7df2e9\") " Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.335267 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-ovsdbserver-nb\") pod \"647208fa-b97a-4dad-964e-75f8ab7df2e9\" (UID: \"647208fa-b97a-4dad-964e-75f8ab7df2e9\") " Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.335376 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-dns-svc\") pod \"647208fa-b97a-4dad-964e-75f8ab7df2e9\" (UID: \"647208fa-b97a-4dad-964e-75f8ab7df2e9\") " Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.335693 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnbwk\" (UniqueName: \"kubernetes.io/projected/0763f436-e964-45c5-92f3-bb4cebeb01a9-kube-api-access-hnbwk\") pod \"glance-db-create-25gg4\" (UID: \"0763f436-e964-45c5-92f3-bb4cebeb01a9\") " pod="openstack/glance-db-create-25gg4" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.335781 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0763f436-e964-45c5-92f3-bb4cebeb01a9-operator-scripts\") pod \"glance-db-create-25gg4\" (UID: \"0763f436-e964-45c5-92f3-bb4cebeb01a9\") " pod="openstack/glance-db-create-25gg4" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.336825 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0763f436-e964-45c5-92f3-bb4cebeb01a9-operator-scripts\") pod \"glance-db-create-25gg4\" (UID: \"0763f436-e964-45c5-92f3-bb4cebeb01a9\") " pod="openstack/glance-db-create-25gg4" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.342742 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6e21-account-create-update-6fr54" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.354401 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/647208fa-b97a-4dad-964e-75f8ab7df2e9-kube-api-access-qzfns" (OuterVolumeSpecName: "kube-api-access-qzfns") pod "647208fa-b97a-4dad-964e-75f8ab7df2e9" (UID: "647208fa-b97a-4dad-964e-75f8ab7df2e9"). InnerVolumeSpecName "kube-api-access-qzfns". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.369026 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnbwk\" (UniqueName: \"kubernetes.io/projected/0763f436-e964-45c5-92f3-bb4cebeb01a9-kube-api-access-hnbwk\") pod \"glance-db-create-25gg4\" (UID: \"0763f436-e964-45c5-92f3-bb4cebeb01a9\") " pod="openstack/glance-db-create-25gg4" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.374263 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-25gg4" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.437717 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzfns\" (UniqueName: \"kubernetes.io/projected/647208fa-b97a-4dad-964e-75f8ab7df2e9-kube-api-access-qzfns\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.471278 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "647208fa-b97a-4dad-964e-75f8ab7df2e9" (UID: "647208fa-b97a-4dad-964e-75f8ab7df2e9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.473474 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "647208fa-b97a-4dad-964e-75f8ab7df2e9" (UID: "647208fa-b97a-4dad-964e-75f8ab7df2e9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.492976 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-config" (OuterVolumeSpecName: "config") pod "647208fa-b97a-4dad-964e-75f8ab7df2e9" (UID: "647208fa-b97a-4dad-964e-75f8ab7df2e9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.531214 4703 generic.go:334] "Generic (PLEG): container finished" podID="647208fa-b97a-4dad-964e-75f8ab7df2e9" containerID="98e4ffddb224966d389a96df45189053c91d3d1dfbb2379caf22dfdab44aaab4" exitCode=0 Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.531380 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" event={"ID":"647208fa-b97a-4dad-964e-75f8ab7df2e9","Type":"ContainerDied","Data":"98e4ffddb224966d389a96df45189053c91d3d1dfbb2379caf22dfdab44aaab4"} Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.531426 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" event={"ID":"647208fa-b97a-4dad-964e-75f8ab7df2e9","Type":"ContainerDied","Data":"38dbf53c1fe7962f62f30f0cdd2a538937461b8dc187bb3cd8d55c9500f9e131"} Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.531483 4703 scope.go:117] "RemoveContainer" containerID="98e4ffddb224966d389a96df45189053c91d3d1dfbb2379caf22dfdab44aaab4" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.531694 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7fd796d7df-ncbx2" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.539427 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5b848cfc-5296-423c-9a02-45bf4c2c850b","Type":"ContainerStarted","Data":"a420b77d16d4c420d09ffff99bbde1262bf327b28fa659ae09ef6f451acc3750"} Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.540666 4703 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.540789 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.540803 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/647208fa-b97a-4dad-964e-75f8ab7df2e9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.611831 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=6.343736778 podStartE2EDuration="52.611802553s" podCreationTimestamp="2026-01-30 12:16:29 +0000 UTC" firstStartedPulling="2026-01-30 12:16:34.408222073 +0000 UTC m=+1230.186043757" lastFinishedPulling="2026-01-30 12:17:20.676287878 +0000 UTC m=+1276.454109532" observedRunningTime="2026-01-30 12:17:21.607881967 +0000 UTC m=+1277.385703621" watchObservedRunningTime="2026-01-30 12:17:21.611802553 +0000 UTC m=+1277.389624207" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.618228 4703 scope.go:117] "RemoveContainer" containerID="f0c69f64e27fe1ad6c5fdfb1487a5c3702ad792ce64dbe866533402c89cf3bea" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.660314 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-ncbx2"] Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.669658 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7fd796d7df-ncbx2"] Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.766224 4703 scope.go:117] "RemoveContainer" containerID="98e4ffddb224966d389a96df45189053c91d3d1dfbb2379caf22dfdab44aaab4" Jan 30 12:17:21 crc kubenswrapper[4703]: E0130 12:17:21.767469 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98e4ffddb224966d389a96df45189053c91d3d1dfbb2379caf22dfdab44aaab4\": container with ID starting with 98e4ffddb224966d389a96df45189053c91d3d1dfbb2379caf22dfdab44aaab4 not found: ID does not exist" containerID="98e4ffddb224966d389a96df45189053c91d3d1dfbb2379caf22dfdab44aaab4" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.767572 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98e4ffddb224966d389a96df45189053c91d3d1dfbb2379caf22dfdab44aaab4"} err="failed to get container status \"98e4ffddb224966d389a96df45189053c91d3d1dfbb2379caf22dfdab44aaab4\": rpc error: code = NotFound desc = could not find container \"98e4ffddb224966d389a96df45189053c91d3d1dfbb2379caf22dfdab44aaab4\": container with ID starting with 98e4ffddb224966d389a96df45189053c91d3d1dfbb2379caf22dfdab44aaab4 not found: ID does not exist" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.767609 4703 scope.go:117] "RemoveContainer" containerID="f0c69f64e27fe1ad6c5fdfb1487a5c3702ad792ce64dbe866533402c89cf3bea" Jan 30 12:17:21 crc kubenswrapper[4703]: E0130 12:17:21.767962 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0c69f64e27fe1ad6c5fdfb1487a5c3702ad792ce64dbe866533402c89cf3bea\": container with ID starting with f0c69f64e27fe1ad6c5fdfb1487a5c3702ad792ce64dbe866533402c89cf3bea not found: ID does not exist" containerID="f0c69f64e27fe1ad6c5fdfb1487a5c3702ad792ce64dbe866533402c89cf3bea" Jan 30 12:17:21 crc kubenswrapper[4703]: I0130 12:17:21.768025 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0c69f64e27fe1ad6c5fdfb1487a5c3702ad792ce64dbe866533402c89cf3bea"} err="failed to get container status \"f0c69f64e27fe1ad6c5fdfb1487a5c3702ad792ce64dbe866533402c89cf3bea\": rpc error: code = NotFound desc = could not find container \"f0c69f64e27fe1ad6c5fdfb1487a5c3702ad792ce64dbe866533402c89cf3bea\": container with ID starting with f0c69f64e27fe1ad6c5fdfb1487a5c3702ad792ce64dbe866533402c89cf3bea not found: ID does not exist" Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.052213 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-25gg4"] Jan 30 12:17:22 crc kubenswrapper[4703]: W0130 12:17:22.067687 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod05cf9256_9286_4808_9d1c_9e73e8fa2860.slice/crio-dadc70a1264772a327929ba475ec120c58c00eae4ac7b8ac4b6f9ed3a8fae243 WatchSource:0}: Error finding container dadc70a1264772a327929ba475ec120c58c00eae4ac7b8ac4b6f9ed3a8fae243: Status 404 returned error can't find the container with id dadc70a1264772a327929ba475ec120c58c00eae4ac7b8ac4b6f9ed3a8fae243 Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.073987 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-6e21-account-create-update-6fr54"] Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.110892 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-x7jrq"] Jan 30 12:17:22 crc kubenswrapper[4703]: E0130 12:17:22.111716 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="647208fa-b97a-4dad-964e-75f8ab7df2e9" containerName="dnsmasq-dns" Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.111741 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="647208fa-b97a-4dad-964e-75f8ab7df2e9" containerName="dnsmasq-dns" Jan 30 12:17:22 crc kubenswrapper[4703]: E0130 12:17:22.111756 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="647208fa-b97a-4dad-964e-75f8ab7df2e9" containerName="init" Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.111764 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="647208fa-b97a-4dad-964e-75f8ab7df2e9" containerName="init" Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.111949 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="647208fa-b97a-4dad-964e-75f8ab7df2e9" containerName="dnsmasq-dns" Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.113674 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-x7jrq" Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.119811 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.123361 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-x7jrq"] Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.150226 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.271539 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a778462-d253-404e-acde-17f65431a62b-operator-scripts\") pod \"root-account-create-update-x7jrq\" (UID: \"5a778462-d253-404e-acde-17f65431a62b\") " pod="openstack/root-account-create-update-x7jrq" Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.272053 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p962j\" (UniqueName: \"kubernetes.io/projected/5a778462-d253-404e-acde-17f65431a62b-kube-api-access-p962j\") pod \"root-account-create-update-x7jrq\" (UID: \"5a778462-d253-404e-acde-17f65431a62b\") " pod="openstack/root-account-create-update-x7jrq" Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.373678 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a778462-d253-404e-acde-17f65431a62b-operator-scripts\") pod \"root-account-create-update-x7jrq\" (UID: \"5a778462-d253-404e-acde-17f65431a62b\") " pod="openstack/root-account-create-update-x7jrq" Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.375289 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p962j\" (UniqueName: \"kubernetes.io/projected/5a778462-d253-404e-acde-17f65431a62b-kube-api-access-p962j\") pod \"root-account-create-update-x7jrq\" (UID: \"5a778462-d253-404e-acde-17f65431a62b\") " pod="openstack/root-account-create-update-x7jrq" Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.374970 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a778462-d253-404e-acde-17f65431a62b-operator-scripts\") pod \"root-account-create-update-x7jrq\" (UID: \"5a778462-d253-404e-acde-17f65431a62b\") " pod="openstack/root-account-create-update-x7jrq" Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.403324 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p962j\" (UniqueName: \"kubernetes.io/projected/5a778462-d253-404e-acde-17f65431a62b-kube-api-access-p962j\") pod \"root-account-create-update-x7jrq\" (UID: \"5a778462-d253-404e-acde-17f65431a62b\") " pod="openstack/root-account-create-update-x7jrq" Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.557966 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-6e21-account-create-update-6fr54" event={"ID":"05cf9256-9286-4808-9d1c-9e73e8fa2860","Type":"ContainerStarted","Data":"8f12f4da993d6766f823f8295f03593cb7d86f53d9ac64ba51601c08925c6e03"} Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.558024 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-6e21-account-create-update-6fr54" event={"ID":"05cf9256-9286-4808-9d1c-9e73e8fa2860","Type":"ContainerStarted","Data":"dadc70a1264772a327929ba475ec120c58c00eae4ac7b8ac4b6f9ed3a8fae243"} Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.566337 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-25gg4" event={"ID":"0763f436-e964-45c5-92f3-bb4cebeb01a9","Type":"ContainerStarted","Data":"961b8bfc95a8bebc04532d78d8606028a66b423b6f1fff22a433ac0b34fed470"} Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.566388 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-25gg4" event={"ID":"0763f436-e964-45c5-92f3-bb4cebeb01a9","Type":"ContainerStarted","Data":"86120efe9319036c98de526b6b49fb8253b07b9be5d8d67f8de111562502d76a"} Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.584056 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-6e21-account-create-update-6fr54" podStartSLOduration=2.584027234 podStartE2EDuration="2.584027234s" podCreationTimestamp="2026-01-30 12:17:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:17:22.579606206 +0000 UTC m=+1278.357427860" watchObservedRunningTime="2026-01-30 12:17:22.584027234 +0000 UTC m=+1278.361848888" Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.603455 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-25gg4" podStartSLOduration=2.603431217 podStartE2EDuration="2.603431217s" podCreationTimestamp="2026-01-30 12:17:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:17:22.599427439 +0000 UTC m=+1278.377249093" watchObservedRunningTime="2026-01-30 12:17:22.603431217 +0000 UTC m=+1278.381252871" Jan 30 12:17:22 crc kubenswrapper[4703]: I0130 12:17:22.646579 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-x7jrq" Jan 30 12:17:23 crc kubenswrapper[4703]: I0130 12:17:23.100929 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="647208fa-b97a-4dad-964e-75f8ab7df2e9" path="/var/lib/kubelet/pods/647208fa-b97a-4dad-964e-75f8ab7df2e9/volumes" Jan 30 12:17:23 crc kubenswrapper[4703]: I0130 12:17:23.205872 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-x7jrq"] Jan 30 12:17:23 crc kubenswrapper[4703]: W0130 12:17:23.222091 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5a778462_d253_404e_acde_17f65431a62b.slice/crio-e20dbb47e0ba0172c09627524de58303636d90a08a04ffc024f6ed21caa5113a WatchSource:0}: Error finding container e20dbb47e0ba0172c09627524de58303636d90a08a04ffc024f6ed21caa5113a: Status 404 returned error can't find the container with id e20dbb47e0ba0172c09627524de58303636d90a08a04ffc024f6ed21caa5113a Jan 30 12:17:23 crc kubenswrapper[4703]: I0130 12:17:23.579219 4703 generic.go:334] "Generic (PLEG): container finished" podID="05cf9256-9286-4808-9d1c-9e73e8fa2860" containerID="8f12f4da993d6766f823f8295f03593cb7d86f53d9ac64ba51601c08925c6e03" exitCode=0 Jan 30 12:17:23 crc kubenswrapper[4703]: I0130 12:17:23.579285 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-6e21-account-create-update-6fr54" event={"ID":"05cf9256-9286-4808-9d1c-9e73e8fa2860","Type":"ContainerDied","Data":"8f12f4da993d6766f823f8295f03593cb7d86f53d9ac64ba51601c08925c6e03"} Jan 30 12:17:23 crc kubenswrapper[4703]: I0130 12:17:23.582066 4703 generic.go:334] "Generic (PLEG): container finished" podID="0763f436-e964-45c5-92f3-bb4cebeb01a9" containerID="961b8bfc95a8bebc04532d78d8606028a66b423b6f1fff22a433ac0b34fed470" exitCode=0 Jan 30 12:17:23 crc kubenswrapper[4703]: I0130 12:17:23.582288 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-25gg4" event={"ID":"0763f436-e964-45c5-92f3-bb4cebeb01a9","Type":"ContainerDied","Data":"961b8bfc95a8bebc04532d78d8606028a66b423b6f1fff22a433ac0b34fed470"} Jan 30 12:17:23 crc kubenswrapper[4703]: I0130 12:17:23.585912 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"7847f607-512e-440a-af08-8fc3104621b6","Type":"ContainerStarted","Data":"15669b51c5fe1aa82ec8da8d695ffce6ef991203cc9db102d30a7a7359dec759"} Jan 30 12:17:23 crc kubenswrapper[4703]: I0130 12:17:23.587064 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 30 12:17:23 crc kubenswrapper[4703]: I0130 12:17:23.589274 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-x7jrq" event={"ID":"5a778462-d253-404e-acde-17f65431a62b","Type":"ContainerStarted","Data":"f22a6a175a2dc39c2b374372a89450bb61ba631f529ca883978751cf00e7dc4e"} Jan 30 12:17:23 crc kubenswrapper[4703]: I0130 12:17:23.589337 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-x7jrq" event={"ID":"5a778462-d253-404e-acde-17f65431a62b","Type":"ContainerStarted","Data":"e20dbb47e0ba0172c09627524de58303636d90a08a04ffc024f6ed21caa5113a"} Jan 30 12:17:23 crc kubenswrapper[4703]: I0130 12:17:23.643377 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=3.011771506 podStartE2EDuration="56.643343039s" podCreationTimestamp="2026-01-30 12:16:27 +0000 UTC" firstStartedPulling="2026-01-30 12:16:28.859617194 +0000 UTC m=+1224.637438848" lastFinishedPulling="2026-01-30 12:17:22.491188727 +0000 UTC m=+1278.269010381" observedRunningTime="2026-01-30 12:17:23.628376307 +0000 UTC m=+1279.406197961" watchObservedRunningTime="2026-01-30 12:17:23.643343039 +0000 UTC m=+1279.421164713" Jan 30 12:17:23 crc kubenswrapper[4703]: I0130 12:17:23.666993 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-x7jrq" podStartSLOduration=1.666966885 podStartE2EDuration="1.666966885s" podCreationTimestamp="2026-01-30 12:17:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:17:23.660682407 +0000 UTC m=+1279.438504071" watchObservedRunningTime="2026-01-30 12:17:23.666966885 +0000 UTC m=+1279.444788539" Jan 30 12:17:24 crc kubenswrapper[4703]: I0130 12:17:24.603780 4703 generic.go:334] "Generic (PLEG): container finished" podID="5a778462-d253-404e-acde-17f65431a62b" containerID="f22a6a175a2dc39c2b374372a89450bb61ba631f529ca883978751cf00e7dc4e" exitCode=0 Jan 30 12:17:24 crc kubenswrapper[4703]: I0130 12:17:24.603876 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-x7jrq" event={"ID":"5a778462-d253-404e-acde-17f65431a62b","Type":"ContainerDied","Data":"f22a6a175a2dc39c2b374372a89450bb61ba631f529ca883978751cf00e7dc4e"} Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.070869 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-l67m7"] Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.073462 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-l67m7" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.084016 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-l67m7"] Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.212627 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.213194 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.243688 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhzf4\" (UniqueName: \"kubernetes.io/projected/1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0-kube-api-access-rhzf4\") pod \"keystone-db-create-l67m7\" (UID: \"1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0\") " pod="openstack/keystone-db-create-l67m7" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.243896 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0-operator-scripts\") pod \"keystone-db-create-l67m7\" (UID: \"1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0\") " pod="openstack/keystone-db-create-l67m7" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.308408 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.308502 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.329593 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-ee05-account-create-update-dgl6z"] Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.331277 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ee05-account-create-update-dgl6z" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.335291 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.339531 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-ee05-account-create-update-dgl6z"] Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.346470 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0-operator-scripts\") pod \"keystone-db-create-l67m7\" (UID: \"1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0\") " pod="openstack/keystone-db-create-l67m7" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.346588 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhzf4\" (UniqueName: \"kubernetes.io/projected/1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0-kube-api-access-rhzf4\") pod \"keystone-db-create-l67m7\" (UID: \"1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0\") " pod="openstack/keystone-db-create-l67m7" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.347905 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0-operator-scripts\") pod \"keystone-db-create-l67m7\" (UID: \"1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0\") " pod="openstack/keystone-db-create-l67m7" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.395111 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhzf4\" (UniqueName: \"kubernetes.io/projected/1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0-kube-api-access-rhzf4\") pod \"keystone-db-create-l67m7\" (UID: \"1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0\") " pod="openstack/keystone-db-create-l67m7" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.420072 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-l67m7" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.448507 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxh2d\" (UniqueName: \"kubernetes.io/projected/f40c42c3-2575-425d-b80e-845f4a0a88b5-kube-api-access-pxh2d\") pod \"keystone-ee05-account-create-update-dgl6z\" (UID: \"f40c42c3-2575-425d-b80e-845f4a0a88b5\") " pod="openstack/keystone-ee05-account-create-update-dgl6z" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.449221 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f40c42c3-2575-425d-b80e-845f4a0a88b5-operator-scripts\") pod \"keystone-ee05-account-create-update-dgl6z\" (UID: \"f40c42c3-2575-425d-b80e-845f4a0a88b5\") " pod="openstack/keystone-ee05-account-create-update-dgl6z" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.523900 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.551330 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxh2d\" (UniqueName: \"kubernetes.io/projected/f40c42c3-2575-425d-b80e-845f4a0a88b5-kube-api-access-pxh2d\") pod \"keystone-ee05-account-create-update-dgl6z\" (UID: \"f40c42c3-2575-425d-b80e-845f4a0a88b5\") " pod="openstack/keystone-ee05-account-create-update-dgl6z" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.551449 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f40c42c3-2575-425d-b80e-845f4a0a88b5-operator-scripts\") pod \"keystone-ee05-account-create-update-dgl6z\" (UID: \"f40c42c3-2575-425d-b80e-845f4a0a88b5\") " pod="openstack/keystone-ee05-account-create-update-dgl6z" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.552685 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f40c42c3-2575-425d-b80e-845f4a0a88b5-operator-scripts\") pod \"keystone-ee05-account-create-update-dgl6z\" (UID: \"f40c42c3-2575-425d-b80e-845f4a0a88b5\") " pod="openstack/keystone-ee05-account-create-update-dgl6z" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.590503 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxh2d\" (UniqueName: \"kubernetes.io/projected/f40c42c3-2575-425d-b80e-845f4a0a88b5-kube-api-access-pxh2d\") pod \"keystone-ee05-account-create-update-dgl6z\" (UID: \"f40c42c3-2575-425d-b80e-845f4a0a88b5\") " pod="openstack/keystone-ee05-account-create-update-dgl6z" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.618997 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-kttq7"] Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.620491 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-kttq7" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.640236 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-kttq7"] Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.664764 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ee05-account-create-update-dgl6z" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.725084 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-e48f-account-create-update-mkwjg"] Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.726866 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-e48f-account-create-update-mkwjg" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.734612 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.735342 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-e48f-account-create-update-mkwjg"] Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.755808 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zrtc\" (UniqueName: \"kubernetes.io/projected/47c89c69-b5ba-480b-bd12-1aeaaf6cbc01-kube-api-access-7zrtc\") pod \"placement-db-create-kttq7\" (UID: \"47c89c69-b5ba-480b-bd12-1aeaaf6cbc01\") " pod="openstack/placement-db-create-kttq7" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.755927 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47c89c69-b5ba-480b-bd12-1aeaaf6cbc01-operator-scripts\") pod \"placement-db-create-kttq7\" (UID: \"47c89c69-b5ba-480b-bd12-1aeaaf6cbc01\") " pod="openstack/placement-db-create-kttq7" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.820784 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.862109 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zrtc\" (UniqueName: \"kubernetes.io/projected/47c89c69-b5ba-480b-bd12-1aeaaf6cbc01-kube-api-access-7zrtc\") pod \"placement-db-create-kttq7\" (UID: \"47c89c69-b5ba-480b-bd12-1aeaaf6cbc01\") " pod="openstack/placement-db-create-kttq7" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.862260 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47c89c69-b5ba-480b-bd12-1aeaaf6cbc01-operator-scripts\") pod \"placement-db-create-kttq7\" (UID: \"47c89c69-b5ba-480b-bd12-1aeaaf6cbc01\") " pod="openstack/placement-db-create-kttq7" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.862315 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jws2\" (UniqueName: \"kubernetes.io/projected/107ee781-3758-4b91-81d2-f934501de19e-kube-api-access-7jws2\") pod \"placement-e48f-account-create-update-mkwjg\" (UID: \"107ee781-3758-4b91-81d2-f934501de19e\") " pod="openstack/placement-e48f-account-create-update-mkwjg" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.862455 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/107ee781-3758-4b91-81d2-f934501de19e-operator-scripts\") pod \"placement-e48f-account-create-update-mkwjg\" (UID: \"107ee781-3758-4b91-81d2-f934501de19e\") " pod="openstack/placement-e48f-account-create-update-mkwjg" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.864341 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47c89c69-b5ba-480b-bd12-1aeaaf6cbc01-operator-scripts\") pod \"placement-db-create-kttq7\" (UID: \"47c89c69-b5ba-480b-bd12-1aeaaf6cbc01\") " pod="openstack/placement-db-create-kttq7" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.890256 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zrtc\" (UniqueName: \"kubernetes.io/projected/47c89c69-b5ba-480b-bd12-1aeaaf6cbc01-kube-api-access-7zrtc\") pod \"placement-db-create-kttq7\" (UID: \"47c89c69-b5ba-480b-bd12-1aeaaf6cbc01\") " pod="openstack/placement-db-create-kttq7" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.952427 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-kttq7" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.964710 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jws2\" (UniqueName: \"kubernetes.io/projected/107ee781-3758-4b91-81d2-f934501de19e-kube-api-access-7jws2\") pod \"placement-e48f-account-create-update-mkwjg\" (UID: \"107ee781-3758-4b91-81d2-f934501de19e\") " pod="openstack/placement-e48f-account-create-update-mkwjg" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.964818 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/107ee781-3758-4b91-81d2-f934501de19e-operator-scripts\") pod \"placement-e48f-account-create-update-mkwjg\" (UID: \"107ee781-3758-4b91-81d2-f934501de19e\") " pod="openstack/placement-e48f-account-create-update-mkwjg" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.965824 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/107ee781-3758-4b91-81d2-f934501de19e-operator-scripts\") pod \"placement-e48f-account-create-update-mkwjg\" (UID: \"107ee781-3758-4b91-81d2-f934501de19e\") " pod="openstack/placement-e48f-account-create-update-mkwjg" Jan 30 12:17:25 crc kubenswrapper[4703]: I0130 12:17:25.989732 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jws2\" (UniqueName: \"kubernetes.io/projected/107ee781-3758-4b91-81d2-f934501de19e-kube-api-access-7jws2\") pod \"placement-e48f-account-create-update-mkwjg\" (UID: \"107ee781-3758-4b91-81d2-f934501de19e\") " pod="openstack/placement-e48f-account-create-update-mkwjg" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.082244 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-e48f-account-create-update-mkwjg" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.206817 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.391196 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.392863 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.396588 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.396953 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.397162 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.407813 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-76vdf" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.425503 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.478856 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97678\" (UniqueName: \"kubernetes.io/projected/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-kube-api-access-97678\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.478990 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.479036 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.479265 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-config\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.479474 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.479516 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.479559 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-scripts\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.581915 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-scripts\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.582039 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97678\" (UniqueName: \"kubernetes.io/projected/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-kube-api-access-97678\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.582166 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.582209 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.582248 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-config\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.582293 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.582318 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.582818 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.583243 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-scripts\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.583661 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-config\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.588215 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.588266 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.589949 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.667498 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97678\" (UniqueName: \"kubernetes.io/projected/6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea-kube-api-access-97678\") pod \"ovn-northd-0\" (UID: \"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea\") " pod="openstack/ovn-northd-0" Jan 30 12:17:26 crc kubenswrapper[4703]: I0130 12:17:26.721203 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.504171 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-create-fdwhn"] Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.506497 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-fdwhn" Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.515901 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-fdwhn"] Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.532909 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.609047 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dfaefc7-3070-4d23-815d-164d669de123-operator-scripts\") pod \"watcher-db-create-fdwhn\" (UID: \"2dfaefc7-3070-4d23-815d-164d669de123\") " pod="openstack/watcher-db-create-fdwhn" Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.609159 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4kj6\" (UniqueName: \"kubernetes.io/projected/2dfaefc7-3070-4d23-815d-164d669de123-kube-api-access-f4kj6\") pod \"watcher-db-create-fdwhn\" (UID: \"2dfaefc7-3070-4d23-815d-164d669de123\") " pod="openstack/watcher-db-create-fdwhn" Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.670336 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-badc-account-create-update-v5rg5"] Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.677507 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-badc-account-create-update-v5rg5" Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.693468 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-db-secret" Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.716088 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dfaefc7-3070-4d23-815d-164d669de123-operator-scripts\") pod \"watcher-db-create-fdwhn\" (UID: \"2dfaefc7-3070-4d23-815d-164d669de123\") " pod="openstack/watcher-db-create-fdwhn" Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.722603 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dfaefc7-3070-4d23-815d-164d669de123-operator-scripts\") pod \"watcher-db-create-fdwhn\" (UID: \"2dfaefc7-3070-4d23-815d-164d669de123\") " pod="openstack/watcher-db-create-fdwhn" Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.725433 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4kj6\" (UniqueName: \"kubernetes.io/projected/2dfaefc7-3070-4d23-815d-164d669de123-kube-api-access-f4kj6\") pod \"watcher-db-create-fdwhn\" (UID: \"2dfaefc7-3070-4d23-815d-164d669de123\") " pod="openstack/watcher-db-create-fdwhn" Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.770990 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4kj6\" (UniqueName: \"kubernetes.io/projected/2dfaefc7-3070-4d23-815d-164d669de123-kube-api-access-f4kj6\") pod \"watcher-db-create-fdwhn\" (UID: \"2dfaefc7-3070-4d23-815d-164d669de123\") " pod="openstack/watcher-db-create-fdwhn" Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.806111 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-badc-account-create-update-v5rg5"] Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.828079 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nl7jd\" (UniqueName: \"kubernetes.io/projected/1c7d9a80-1086-4991-89b9-d8dffe01eadd-kube-api-access-nl7jd\") pod \"watcher-badc-account-create-update-v5rg5\" (UID: \"1c7d9a80-1086-4991-89b9-d8dffe01eadd\") " pod="openstack/watcher-badc-account-create-update-v5rg5" Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.828215 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c7d9a80-1086-4991-89b9-d8dffe01eadd-operator-scripts\") pod \"watcher-badc-account-create-update-v5rg5\" (UID: \"1c7d9a80-1086-4991-89b9-d8dffe01eadd\") " pod="openstack/watcher-badc-account-create-update-v5rg5" Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.836672 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-fdwhn" Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.930767 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nl7jd\" (UniqueName: \"kubernetes.io/projected/1c7d9a80-1086-4991-89b9-d8dffe01eadd-kube-api-access-nl7jd\") pod \"watcher-badc-account-create-update-v5rg5\" (UID: \"1c7d9a80-1086-4991-89b9-d8dffe01eadd\") " pod="openstack/watcher-badc-account-create-update-v5rg5" Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.930931 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c7d9a80-1086-4991-89b9-d8dffe01eadd-operator-scripts\") pod \"watcher-badc-account-create-update-v5rg5\" (UID: \"1c7d9a80-1086-4991-89b9-d8dffe01eadd\") " pod="openstack/watcher-badc-account-create-update-v5rg5" Jan 30 12:17:27 crc kubenswrapper[4703]: I0130 12:17:27.932182 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c7d9a80-1086-4991-89b9-d8dffe01eadd-operator-scripts\") pod \"watcher-badc-account-create-update-v5rg5\" (UID: \"1c7d9a80-1086-4991-89b9-d8dffe01eadd\") " pod="openstack/watcher-badc-account-create-update-v5rg5" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.045959 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nl7jd\" (UniqueName: \"kubernetes.io/projected/1c7d9a80-1086-4991-89b9-d8dffe01eadd-kube-api-access-nl7jd\") pod \"watcher-badc-account-create-update-v5rg5\" (UID: \"1c7d9a80-1086-4991-89b9-d8dffe01eadd\") " pod="openstack/watcher-badc-account-create-update-v5rg5" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.098871 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-9gqcb"] Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.101075 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.108017 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-9gqcb"] Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.247602 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-9gqcb\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.247708 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qsc9\" (UniqueName: \"kubernetes.io/projected/5f0bf583-356b-4550-9257-ec624b1e5de1-kube-api-access-8qsc9\") pod \"dnsmasq-dns-698758b865-9gqcb\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.247735 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-dns-svc\") pod \"dnsmasq-dns-698758b865-9gqcb\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.247796 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-9gqcb\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.247830 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-config\") pod \"dnsmasq-dns-698758b865-9gqcb\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.332393 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-badc-account-create-update-v5rg5" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.355658 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-9gqcb\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.355815 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qsc9\" (UniqueName: \"kubernetes.io/projected/5f0bf583-356b-4550-9257-ec624b1e5de1-kube-api-access-8qsc9\") pod \"dnsmasq-dns-698758b865-9gqcb\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.355858 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-dns-svc\") pod \"dnsmasq-dns-698758b865-9gqcb\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.355949 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-9gqcb\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.355983 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-config\") pod \"dnsmasq-dns-698758b865-9gqcb\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.378223 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-config\") pod \"dnsmasq-dns-698758b865-9gqcb\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.378310 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-9gqcb\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.378731 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-9gqcb\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.379622 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-dns-svc\") pod \"dnsmasq-dns-698758b865-9gqcb\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.436748 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qsc9\" (UniqueName: \"kubernetes.io/projected/5f0bf583-356b-4550-9257-ec624b1e5de1-kube-api-access-8qsc9\") pod \"dnsmasq-dns-698758b865-9gqcb\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:28 crc kubenswrapper[4703]: I0130 12:17:28.498961 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.346839 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.361838 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.372269 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.372597 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.372776 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.375321 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-ph5kk" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.394635 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.489953 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.490018 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/6aed7811-b088-403c-bbef-7844c17d52ff-cache\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.490179 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6aed7811-b088-403c-bbef-7844c17d52ff-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.490205 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d27lb\" (UniqueName: \"kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-kube-api-access-d27lb\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.490232 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.490256 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/6aed7811-b088-403c-bbef-7844c17d52ff-lock\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.592277 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/6aed7811-b088-403c-bbef-7844c17d52ff-lock\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.592421 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.592451 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/6aed7811-b088-403c-bbef-7844c17d52ff-cache\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.592502 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6aed7811-b088-403c-bbef-7844c17d52ff-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.592529 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d27lb\" (UniqueName: \"kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-kube-api-access-d27lb\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.592552 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.593043 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.594706 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/6aed7811-b088-403c-bbef-7844c17d52ff-cache\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: E0130 12:17:29.594841 4703 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 30 12:17:29 crc kubenswrapper[4703]: E0130 12:17:29.594876 4703 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.594839 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/6aed7811-b088-403c-bbef-7844c17d52ff-lock\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: E0130 12:17:29.594937 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift podName:6aed7811-b088-403c-bbef-7844c17d52ff nodeName:}" failed. No retries permitted until 2026-01-30 12:17:30.094912812 +0000 UTC m=+1285.872734466 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift") pod "swift-storage-0" (UID: "6aed7811-b088-403c-bbef-7844c17d52ff") : configmap "swift-ring-files" not found Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.600170 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6aed7811-b088-403c-bbef-7844c17d52ff-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.621080 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d27lb\" (UniqueName: \"kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-kube-api-access-d27lb\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:29 crc kubenswrapper[4703]: I0130 12:17:29.626976 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.002959 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-v4pc7"] Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.004319 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.013455 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.013535 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.013880 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.041582 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-v4pc7"] Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.103158 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2688299b-7d5f-4fad-9fd9-78de6b83b333-scripts\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.103214 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-combined-ca-bundle\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.103246 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2688299b-7d5f-4fad-9fd9-78de6b83b333-etc-swift\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.103660 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-dispersionconf\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.103743 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2688299b-7d5f-4fad-9fd9-78de6b83b333-ring-data-devices\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.103834 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.103941 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-swiftconf\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: E0130 12:17:30.104023 4703 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 30 12:17:30 crc kubenswrapper[4703]: E0130 12:17:30.104056 4703 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.104033 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zzwcv\" (UniqueName: \"kubernetes.io/projected/2688299b-7d5f-4fad-9fd9-78de6b83b333-kube-api-access-zzwcv\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: E0130 12:17:30.104146 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift podName:6aed7811-b088-403c-bbef-7844c17d52ff nodeName:}" failed. No retries permitted until 2026-01-30 12:17:31.104101394 +0000 UTC m=+1286.881923228 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift") pod "swift-storage-0" (UID: "6aed7811-b088-403c-bbef-7844c17d52ff") : configmap "swift-ring-files" not found Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.206001 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-dispersionconf\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.206067 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2688299b-7d5f-4fad-9fd9-78de6b83b333-ring-data-devices\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.206169 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-swiftconf\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.206214 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zzwcv\" (UniqueName: \"kubernetes.io/projected/2688299b-7d5f-4fad-9fd9-78de6b83b333-kube-api-access-zzwcv\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.206311 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2688299b-7d5f-4fad-9fd9-78de6b83b333-scripts\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.206353 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-combined-ca-bundle\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.206406 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2688299b-7d5f-4fad-9fd9-78de6b83b333-etc-swift\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.207074 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2688299b-7d5f-4fad-9fd9-78de6b83b333-etc-swift\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.207182 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2688299b-7d5f-4fad-9fd9-78de6b83b333-ring-data-devices\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.207381 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2688299b-7d5f-4fad-9fd9-78de6b83b333-scripts\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.215228 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-dispersionconf\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.215399 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-swiftconf\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.217562 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-combined-ca-bundle\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.240376 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zzwcv\" (UniqueName: \"kubernetes.io/projected/2688299b-7d5f-4fad-9fd9-78de6b83b333-kube-api-access-zzwcv\") pod \"swift-ring-rebalance-v4pc7\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:30 crc kubenswrapper[4703]: I0130 12:17:30.333468 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:31 crc kubenswrapper[4703]: I0130 12:17:31.127024 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:31 crc kubenswrapper[4703]: E0130 12:17:31.127391 4703 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 30 12:17:31 crc kubenswrapper[4703]: E0130 12:17:31.127409 4703 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 30 12:17:31 crc kubenswrapper[4703]: E0130 12:17:31.127469 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift podName:6aed7811-b088-403c-bbef-7844c17d52ff nodeName:}" failed. No retries permitted until 2026-01-30 12:17:33.127449532 +0000 UTC m=+1288.905271186 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift") pod "swift-storage-0" (UID: "6aed7811-b088-403c-bbef-7844c17d52ff") : configmap "swift-ring-files" not found Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.790085 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-x7jrq" Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.796589 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-6e21-account-create-update-6fr54" event={"ID":"05cf9256-9286-4808-9d1c-9e73e8fa2860","Type":"ContainerDied","Data":"dadc70a1264772a327929ba475ec120c58c00eae4ac7b8ac4b6f9ed3a8fae243"} Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.796654 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dadc70a1264772a327929ba475ec120c58c00eae4ac7b8ac4b6f9ed3a8fae243" Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.802943 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-25gg4" Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.803663 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-25gg4" event={"ID":"0763f436-e964-45c5-92f3-bb4cebeb01a9","Type":"ContainerDied","Data":"86120efe9319036c98de526b6b49fb8253b07b9be5d8d67f8de111562502d76a"} Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.803710 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="86120efe9319036c98de526b6b49fb8253b07b9be5d8d67f8de111562502d76a" Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.823440 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-x7jrq" event={"ID":"5a778462-d253-404e-acde-17f65431a62b","Type":"ContainerDied","Data":"e20dbb47e0ba0172c09627524de58303636d90a08a04ffc024f6ed21caa5113a"} Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.823639 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6e21-account-create-update-6fr54" Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.823741 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-x7jrq" Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.824226 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e20dbb47e0ba0172c09627524de58303636d90a08a04ffc024f6ed21caa5113a" Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.974335 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05cf9256-9286-4808-9d1c-9e73e8fa2860-operator-scripts\") pod \"05cf9256-9286-4808-9d1c-9e73e8fa2860\" (UID: \"05cf9256-9286-4808-9d1c-9e73e8fa2860\") " Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.974440 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p962j\" (UniqueName: \"kubernetes.io/projected/5a778462-d253-404e-acde-17f65431a62b-kube-api-access-p962j\") pod \"5a778462-d253-404e-acde-17f65431a62b\" (UID: \"5a778462-d253-404e-acde-17f65431a62b\") " Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.974476 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnbwk\" (UniqueName: \"kubernetes.io/projected/0763f436-e964-45c5-92f3-bb4cebeb01a9-kube-api-access-hnbwk\") pod \"0763f436-e964-45c5-92f3-bb4cebeb01a9\" (UID: \"0763f436-e964-45c5-92f3-bb4cebeb01a9\") " Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.974495 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dp2t\" (UniqueName: \"kubernetes.io/projected/05cf9256-9286-4808-9d1c-9e73e8fa2860-kube-api-access-9dp2t\") pod \"05cf9256-9286-4808-9d1c-9e73e8fa2860\" (UID: \"05cf9256-9286-4808-9d1c-9e73e8fa2860\") " Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.974543 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a778462-d253-404e-acde-17f65431a62b-operator-scripts\") pod \"5a778462-d253-404e-acde-17f65431a62b\" (UID: \"5a778462-d253-404e-acde-17f65431a62b\") " Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.974765 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0763f436-e964-45c5-92f3-bb4cebeb01a9-operator-scripts\") pod \"0763f436-e964-45c5-92f3-bb4cebeb01a9\" (UID: \"0763f436-e964-45c5-92f3-bb4cebeb01a9\") " Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.976307 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0763f436-e964-45c5-92f3-bb4cebeb01a9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0763f436-e964-45c5-92f3-bb4cebeb01a9" (UID: "0763f436-e964-45c5-92f3-bb4cebeb01a9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.976875 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/05cf9256-9286-4808-9d1c-9e73e8fa2860-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "05cf9256-9286-4808-9d1c-9e73e8fa2860" (UID: "05cf9256-9286-4808-9d1c-9e73e8fa2860"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.980731 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5a778462-d253-404e-acde-17f65431a62b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5a778462-d253-404e-acde-17f65431a62b" (UID: "5a778462-d253-404e-acde-17f65431a62b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.985704 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05cf9256-9286-4808-9d1c-9e73e8fa2860-kube-api-access-9dp2t" (OuterVolumeSpecName: "kube-api-access-9dp2t") pod "05cf9256-9286-4808-9d1c-9e73e8fa2860" (UID: "05cf9256-9286-4808-9d1c-9e73e8fa2860"). InnerVolumeSpecName "kube-api-access-9dp2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.985999 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0763f436-e964-45c5-92f3-bb4cebeb01a9-kube-api-access-hnbwk" (OuterVolumeSpecName: "kube-api-access-hnbwk") pod "0763f436-e964-45c5-92f3-bb4cebeb01a9" (UID: "0763f436-e964-45c5-92f3-bb4cebeb01a9"). InnerVolumeSpecName "kube-api-access-hnbwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:17:32 crc kubenswrapper[4703]: I0130 12:17:32.995415 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a778462-d253-404e-acde-17f65431a62b-kube-api-access-p962j" (OuterVolumeSpecName: "kube-api-access-p962j") pod "5a778462-d253-404e-acde-17f65431a62b" (UID: "5a778462-d253-404e-acde-17f65431a62b"). InnerVolumeSpecName "kube-api-access-p962j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:17:33 crc kubenswrapper[4703]: I0130 12:17:33.077364 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0763f436-e964-45c5-92f3-bb4cebeb01a9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:33 crc kubenswrapper[4703]: I0130 12:17:33.077415 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/05cf9256-9286-4808-9d1c-9e73e8fa2860-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:33 crc kubenswrapper[4703]: I0130 12:17:33.077430 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p962j\" (UniqueName: \"kubernetes.io/projected/5a778462-d253-404e-acde-17f65431a62b-kube-api-access-p962j\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:33 crc kubenswrapper[4703]: I0130 12:17:33.077458 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnbwk\" (UniqueName: \"kubernetes.io/projected/0763f436-e964-45c5-92f3-bb4cebeb01a9-kube-api-access-hnbwk\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:33 crc kubenswrapper[4703]: I0130 12:17:33.077471 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dp2t\" (UniqueName: \"kubernetes.io/projected/05cf9256-9286-4808-9d1c-9e73e8fa2860-kube-api-access-9dp2t\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:33 crc kubenswrapper[4703]: I0130 12:17:33.077484 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5a778462-d253-404e-acde-17f65431a62b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:33 crc kubenswrapper[4703]: I0130 12:17:33.180461 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:33 crc kubenswrapper[4703]: E0130 12:17:33.180777 4703 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 30 12:17:33 crc kubenswrapper[4703]: E0130 12:17:33.181086 4703 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 30 12:17:33 crc kubenswrapper[4703]: E0130 12:17:33.181199 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift podName:6aed7811-b088-403c-bbef-7844c17d52ff nodeName:}" failed. No retries permitted until 2026-01-30 12:17:37.181166706 +0000 UTC m=+1292.958988360 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift") pod "swift-storage-0" (UID: "6aed7811-b088-403c-bbef-7844c17d52ff") : configmap "swift-ring-files" not found Jan 30 12:17:33 crc kubenswrapper[4703]: I0130 12:17:33.566066 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-ee05-account-create-update-dgl6z"] Jan 30 12:17:33 crc kubenswrapper[4703]: I0130 12:17:33.702060 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-badc-account-create-update-v5rg5"] Jan 30 12:17:33 crc kubenswrapper[4703]: W0130 12:17:33.718908 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2688299b_7d5f_4fad_9fd9_78de6b83b333.slice/crio-4179c3d89116126dce7955df011c342d1c90271782254510d50f740f90bd1eb8 WatchSource:0}: Error finding container 4179c3d89116126dce7955df011c342d1c90271782254510d50f740f90bd1eb8: Status 404 returned error can't find the container with id 4179c3d89116126dce7955df011c342d1c90271782254510d50f740f90bd1eb8 Jan 30 12:17:33 crc kubenswrapper[4703]: I0130 12:17:33.723190 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-v4pc7"] Jan 30 12:17:33 crc kubenswrapper[4703]: I0130 12:17:33.775398 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-l67m7"] Jan 30 12:17:33 crc kubenswrapper[4703]: I0130 12:17:33.852648 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-e48f-account-create-update-mkwjg"] Jan 30 12:17:33 crc kubenswrapper[4703]: I0130 12:17:33.908364 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ee05-account-create-update-dgl6z" event={"ID":"f40c42c3-2575-425d-b80e-845f4a0a88b5","Type":"ContainerStarted","Data":"9e57f4312751e2f148ff66a27558803a04fc1339237b204a7fbcda8cdd7b5048"} Jan 30 12:17:33 crc kubenswrapper[4703]: I0130 12:17:33.961790 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-badc-account-create-update-v5rg5" event={"ID":"1c7d9a80-1086-4991-89b9-d8dffe01eadd","Type":"ContainerStarted","Data":"4e2611cd9d4f89b7f872f0ab0f55952aa3783a8412fbbe8a7fbfb0d8e50c74ba"} Jan 30 12:17:34 crc kubenswrapper[4703]: I0130 12:17:34.004531 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-v4pc7" event={"ID":"2688299b-7d5f-4fad-9fd9-78de6b83b333","Type":"ContainerStarted","Data":"4179c3d89116126dce7955df011c342d1c90271782254510d50f740f90bd1eb8"} Jan 30 12:17:34 crc kubenswrapper[4703]: I0130 12:17:34.067079 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-9gqcb"] Jan 30 12:17:34 crc kubenswrapper[4703]: I0130 12:17:34.076389 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8e865e8b-7723-4aed-b51c-ce7a8da59d13","Type":"ContainerStarted","Data":"91438f56e553120198981dddc1f661e2c9ae1348a4ca7f5c03c2fa8964e87fc2"} Jan 30 12:17:34 crc kubenswrapper[4703]: I0130 12:17:34.084826 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 30 12:17:34 crc kubenswrapper[4703]: I0130 12:17:34.093621 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-l67m7" event={"ID":"1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0","Type":"ContainerStarted","Data":"095935abbcd87128300869f876c16ad502d73456b7083ef1b348c48f5abdd73b"} Jan 30 12:17:34 crc kubenswrapper[4703]: I0130 12:17:34.100151 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-25gg4" Jan 30 12:17:34 crc kubenswrapper[4703]: I0130 12:17:34.101082 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-e48f-account-create-update-mkwjg" event={"ID":"107ee781-3758-4b91-81d2-f934501de19e","Type":"ContainerStarted","Data":"4d3a7f67e750a445f5351e295841aaea09395efa81955ce5a3a44ff63d577aea"} Jan 30 12:17:34 crc kubenswrapper[4703]: I0130 12:17:34.101177 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-6e21-account-create-update-6fr54" Jan 30 12:17:34 crc kubenswrapper[4703]: I0130 12:17:34.150728 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-create-fdwhn"] Jan 30 12:17:34 crc kubenswrapper[4703]: I0130 12:17:34.165041 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-kttq7"] Jan 30 12:17:34 crc kubenswrapper[4703]: W0130 12:17:34.180454 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2dfaefc7_3070_4d23_815d_164d669de123.slice/crio-adf5617862018886dfeb634bcaafebb620a80a372f47903699b23018a9549a2d WatchSource:0}: Error finding container adf5617862018886dfeb634bcaafebb620a80a372f47903699b23018a9549a2d: Status 404 returned error can't find the container with id adf5617862018886dfeb634bcaafebb620a80a372f47903699b23018a9549a2d Jan 30 12:17:34 crc kubenswrapper[4703]: W0130 12:17:34.194825 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod47c89c69_b5ba_480b_bd12_1aeaaf6cbc01.slice/crio-5edfeca83bf7b8e704a6507200fcd49261eeb7fae1286903415220a7caeebc8b WatchSource:0}: Error finding container 5edfeca83bf7b8e704a6507200fcd49261eeb7fae1286903415220a7caeebc8b: Status 404 returned error can't find the container with id 5edfeca83bf7b8e704a6507200fcd49261eeb7fae1286903415220a7caeebc8b Jan 30 12:17:35 crc kubenswrapper[4703]: I0130 12:17:35.120611 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-kttq7" event={"ID":"47c89c69-b5ba-480b-bd12-1aeaaf6cbc01","Type":"ContainerStarted","Data":"5edfeca83bf7b8e704a6507200fcd49261eeb7fae1286903415220a7caeebc8b"} Jan 30 12:17:35 crc kubenswrapper[4703]: I0130 12:17:35.125068 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea","Type":"ContainerStarted","Data":"eed499cbcd48dc2a258fb27454ed49aee05ea0e38069eb6f774dcae83951c507"} Jan 30 12:17:35 crc kubenswrapper[4703]: I0130 12:17:35.127309 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-9gqcb" event={"ID":"5f0bf583-356b-4550-9257-ec624b1e5de1","Type":"ContainerStarted","Data":"4b2905208fb3c0ca11df0f33521bab89f8a9217d17f8b2b547ac8b2fc0b23fff"} Jan 30 12:17:35 crc kubenswrapper[4703]: I0130 12:17:35.130974 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-fdwhn" event={"ID":"2dfaefc7-3070-4d23-815d-164d669de123","Type":"ContainerStarted","Data":"adf5617862018886dfeb634bcaafebb620a80a372f47903699b23018a9549a2d"} Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.143191 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-e48f-account-create-update-mkwjg" event={"ID":"107ee781-3758-4b91-81d2-f934501de19e","Type":"ContainerStarted","Data":"1a4429f6b7be2777b7116dee67e75ced428a4c3dc7eb4c8da82b6d62615129f6"} Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.252163 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-hrpzm"] Jan 30 12:17:36 crc kubenswrapper[4703]: E0130 12:17:36.252770 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0763f436-e964-45c5-92f3-bb4cebeb01a9" containerName="mariadb-database-create" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.252812 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="0763f436-e964-45c5-92f3-bb4cebeb01a9" containerName="mariadb-database-create" Jan 30 12:17:36 crc kubenswrapper[4703]: E0130 12:17:36.252836 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a778462-d253-404e-acde-17f65431a62b" containerName="mariadb-account-create-update" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.252844 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a778462-d253-404e-acde-17f65431a62b" containerName="mariadb-account-create-update" Jan 30 12:17:36 crc kubenswrapper[4703]: E0130 12:17:36.252875 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05cf9256-9286-4808-9d1c-9e73e8fa2860" containerName="mariadb-account-create-update" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.252881 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="05cf9256-9286-4808-9d1c-9e73e8fa2860" containerName="mariadb-account-create-update" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.253176 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="0763f436-e964-45c5-92f3-bb4cebeb01a9" containerName="mariadb-database-create" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.253204 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="05cf9256-9286-4808-9d1c-9e73e8fa2860" containerName="mariadb-account-create-update" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.253227 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a778462-d253-404e-acde-17f65431a62b" containerName="mariadb-account-create-update" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.254225 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-hrpzm" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.256839 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.257530 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-5gn24" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.276311 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-hrpzm"] Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.369464 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-db-sync-config-data\") pod \"glance-db-sync-hrpzm\" (UID: \"3da2d370-06f5-4fcc-b58e-2676657e6e85\") " pod="openstack/glance-db-sync-hrpzm" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.369597 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6tfg\" (UniqueName: \"kubernetes.io/projected/3da2d370-06f5-4fcc-b58e-2676657e6e85-kube-api-access-p6tfg\") pod \"glance-db-sync-hrpzm\" (UID: \"3da2d370-06f5-4fcc-b58e-2676657e6e85\") " pod="openstack/glance-db-sync-hrpzm" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.369697 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-config-data\") pod \"glance-db-sync-hrpzm\" (UID: \"3da2d370-06f5-4fcc-b58e-2676657e6e85\") " pod="openstack/glance-db-sync-hrpzm" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.369986 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-combined-ca-bundle\") pod \"glance-db-sync-hrpzm\" (UID: \"3da2d370-06f5-4fcc-b58e-2676657e6e85\") " pod="openstack/glance-db-sync-hrpzm" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.472323 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6tfg\" (UniqueName: \"kubernetes.io/projected/3da2d370-06f5-4fcc-b58e-2676657e6e85-kube-api-access-p6tfg\") pod \"glance-db-sync-hrpzm\" (UID: \"3da2d370-06f5-4fcc-b58e-2676657e6e85\") " pod="openstack/glance-db-sync-hrpzm" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.472475 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-config-data\") pod \"glance-db-sync-hrpzm\" (UID: \"3da2d370-06f5-4fcc-b58e-2676657e6e85\") " pod="openstack/glance-db-sync-hrpzm" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.472598 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-combined-ca-bundle\") pod \"glance-db-sync-hrpzm\" (UID: \"3da2d370-06f5-4fcc-b58e-2676657e6e85\") " pod="openstack/glance-db-sync-hrpzm" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.473776 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-db-sync-config-data\") pod \"glance-db-sync-hrpzm\" (UID: \"3da2d370-06f5-4fcc-b58e-2676657e6e85\") " pod="openstack/glance-db-sync-hrpzm" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.481008 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-db-sync-config-data\") pod \"glance-db-sync-hrpzm\" (UID: \"3da2d370-06f5-4fcc-b58e-2676657e6e85\") " pod="openstack/glance-db-sync-hrpzm" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.481260 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-config-data\") pod \"glance-db-sync-hrpzm\" (UID: \"3da2d370-06f5-4fcc-b58e-2676657e6e85\") " pod="openstack/glance-db-sync-hrpzm" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.494285 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-combined-ca-bundle\") pod \"glance-db-sync-hrpzm\" (UID: \"3da2d370-06f5-4fcc-b58e-2676657e6e85\") " pod="openstack/glance-db-sync-hrpzm" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.494767 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6tfg\" (UniqueName: \"kubernetes.io/projected/3da2d370-06f5-4fcc-b58e-2676657e6e85-kube-api-access-p6tfg\") pod \"glance-db-sync-hrpzm\" (UID: \"3da2d370-06f5-4fcc-b58e-2676657e6e85\") " pod="openstack/glance-db-sync-hrpzm" Jan 30 12:17:36 crc kubenswrapper[4703]: I0130 12:17:36.579953 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-hrpzm" Jan 30 12:17:37 crc kubenswrapper[4703]: I0130 12:17:37.193645 4703 generic.go:334] "Generic (PLEG): container finished" podID="5f0bf583-356b-4550-9257-ec624b1e5de1" containerID="3aace889b773977420b0ed6985271772bde7953e878840124cf13614e3ec5f49" exitCode=0 Jan 30 12:17:37 crc kubenswrapper[4703]: I0130 12:17:37.193799 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:37 crc kubenswrapper[4703]: E0130 12:17:37.193993 4703 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 30 12:17:37 crc kubenswrapper[4703]: I0130 12:17:37.195500 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-9gqcb" event={"ID":"5f0bf583-356b-4550-9257-ec624b1e5de1","Type":"ContainerDied","Data":"3aace889b773977420b0ed6985271772bde7953e878840124cf13614e3ec5f49"} Jan 30 12:17:37 crc kubenswrapper[4703]: E0130 12:17:37.195758 4703 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 30 12:17:37 crc kubenswrapper[4703]: E0130 12:17:37.195881 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift podName:6aed7811-b088-403c-bbef-7844c17d52ff nodeName:}" failed. No retries permitted until 2026-01-30 12:17:45.195847878 +0000 UTC m=+1300.973669672 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift") pod "swift-storage-0" (UID: "6aed7811-b088-403c-bbef-7844c17d52ff") : configmap "swift-ring-files" not found Jan 30 12:17:37 crc kubenswrapper[4703]: I0130 12:17:37.212760 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8e865e8b-7723-4aed-b51c-ce7a8da59d13","Type":"ContainerStarted","Data":"a7b99b52d4717eddd1653ce555492f68bc313f64849651afe1894b2bdbafc0b0"} Jan 30 12:17:37 crc kubenswrapper[4703]: I0130 12:17:37.225020 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-fdwhn" event={"ID":"2dfaefc7-3070-4d23-815d-164d669de123","Type":"ContainerStarted","Data":"80458f4316e79ae733c570a0ce5f80d7a0287a90166a42b27ab0859c125c2319"} Jan 30 12:17:37 crc kubenswrapper[4703]: I0130 12:17:37.230474 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-l67m7" event={"ID":"1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0","Type":"ContainerStarted","Data":"47ffa0e122efb607947d6a1571555ddc9c2f8decb42759a24114ff54bb6e6078"} Jan 30 12:17:37 crc kubenswrapper[4703]: I0130 12:17:37.233731 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ee05-account-create-update-dgl6z" event={"ID":"f40c42c3-2575-425d-b80e-845f4a0a88b5","Type":"ContainerStarted","Data":"4582e0be712fec21723c0463761917ac26a0bebeb1bbba9c315c0c879e55974e"} Jan 30 12:17:37 crc kubenswrapper[4703]: I0130 12:17:37.235936 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-kttq7" event={"ID":"47c89c69-b5ba-480b-bd12-1aeaaf6cbc01","Type":"ContainerStarted","Data":"3d48100bb4aa8798349c226475ed45d9377a003afcdeea8e8ea83148f015720a"} Jan 30 12:17:37 crc kubenswrapper[4703]: I0130 12:17:37.239201 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-badc-account-create-update-v5rg5" event={"ID":"1c7d9a80-1086-4991-89b9-d8dffe01eadd","Type":"ContainerStarted","Data":"87dea74478a4a8b66c31847598edf50382388fac013fe6a4a51054c3e092c2a4"} Jan 30 12:17:37 crc kubenswrapper[4703]: I0130 12:17:37.263848 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-db-create-fdwhn" podStartSLOduration=10.263801736 podStartE2EDuration="10.263801736s" podCreationTimestamp="2026-01-30 12:17:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:17:37.257647731 +0000 UTC m=+1293.035469395" watchObservedRunningTime="2026-01-30 12:17:37.263801736 +0000 UTC m=+1293.041623390" Jan 30 12:17:37 crc kubenswrapper[4703]: I0130 12:17:37.294861 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-hrpzm"] Jan 30 12:17:37 crc kubenswrapper[4703]: I0130 12:17:37.299701 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-kttq7" podStartSLOduration=12.299669331 podStartE2EDuration="12.299669331s" podCreationTimestamp="2026-01-30 12:17:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:17:37.289722434 +0000 UTC m=+1293.067544098" watchObservedRunningTime="2026-01-30 12:17:37.299669331 +0000 UTC m=+1293.077490975" Jan 30 12:17:37 crc kubenswrapper[4703]: I0130 12:17:37.312856 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-badc-account-create-update-v5rg5" podStartSLOduration=10.312839325 podStartE2EDuration="10.312839325s" podCreationTimestamp="2026-01-30 12:17:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:17:37.31115643 +0000 UTC m=+1293.088978114" watchObservedRunningTime="2026-01-30 12:17:37.312839325 +0000 UTC m=+1293.090660979" Jan 30 12:17:37 crc kubenswrapper[4703]: W0130 12:17:37.371647 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3da2d370_06f5_4fcc_b58e_2676657e6e85.slice/crio-5550da3e1d2d27d9cc47f2560f39079093d4b5a2b126aebbb16dddd4de98a4ef WatchSource:0}: Error finding container 5550da3e1d2d27d9cc47f2560f39079093d4b5a2b126aebbb16dddd4de98a4ef: Status 404 returned error can't find the container with id 5550da3e1d2d27d9cc47f2560f39079093d4b5a2b126aebbb16dddd4de98a4ef Jan 30 12:17:37 crc kubenswrapper[4703]: I0130 12:17:37.399116 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-e48f-account-create-update-mkwjg" podStartSLOduration=12.399091247 podStartE2EDuration="12.399091247s" podCreationTimestamp="2026-01-30 12:17:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:17:37.397052682 +0000 UTC m=+1293.174874326" watchObservedRunningTime="2026-01-30 12:17:37.399091247 +0000 UTC m=+1293.176913011" Jan 30 12:17:37 crc kubenswrapper[4703]: I0130 12:17:37.411480 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-ee05-account-create-update-dgl6z" podStartSLOduration=12.41144806 podStartE2EDuration="12.41144806s" podCreationTimestamp="2026-01-30 12:17:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:17:37.357417465 +0000 UTC m=+1293.135239119" watchObservedRunningTime="2026-01-30 12:17:37.41144806 +0000 UTC m=+1293.189269714" Jan 30 12:17:37 crc kubenswrapper[4703]: I0130 12:17:37.448477 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-l67m7" podStartSLOduration=12.448444425 podStartE2EDuration="12.448444425s" podCreationTimestamp="2026-01-30 12:17:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:17:37.428025496 +0000 UTC m=+1293.205847160" watchObservedRunningTime="2026-01-30 12:17:37.448444425 +0000 UTC m=+1293.226266079" Jan 30 12:17:38 crc kubenswrapper[4703]: I0130 12:17:38.250913 4703 generic.go:334] "Generic (PLEG): container finished" podID="2dfaefc7-3070-4d23-815d-164d669de123" containerID="80458f4316e79ae733c570a0ce5f80d7a0287a90166a42b27ab0859c125c2319" exitCode=0 Jan 30 12:17:38 crc kubenswrapper[4703]: I0130 12:17:38.251025 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-fdwhn" event={"ID":"2dfaefc7-3070-4d23-815d-164d669de123","Type":"ContainerDied","Data":"80458f4316e79ae733c570a0ce5f80d7a0287a90166a42b27ab0859c125c2319"} Jan 30 12:17:38 crc kubenswrapper[4703]: I0130 12:17:38.253738 4703 generic.go:334] "Generic (PLEG): container finished" podID="1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0" containerID="47ffa0e122efb607947d6a1571555ddc9c2f8decb42759a24114ff54bb6e6078" exitCode=0 Jan 30 12:17:38 crc kubenswrapper[4703]: I0130 12:17:38.253803 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-l67m7" event={"ID":"1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0","Type":"ContainerDied","Data":"47ffa0e122efb607947d6a1571555ddc9c2f8decb42759a24114ff54bb6e6078"} Jan 30 12:17:38 crc kubenswrapper[4703]: I0130 12:17:38.256430 4703 generic.go:334] "Generic (PLEG): container finished" podID="47c89c69-b5ba-480b-bd12-1aeaaf6cbc01" containerID="3d48100bb4aa8798349c226475ed45d9377a003afcdeea8e8ea83148f015720a" exitCode=0 Jan 30 12:17:38 crc kubenswrapper[4703]: I0130 12:17:38.256578 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-kttq7" event={"ID":"47c89c69-b5ba-480b-bd12-1aeaaf6cbc01","Type":"ContainerDied","Data":"3d48100bb4aa8798349c226475ed45d9377a003afcdeea8e8ea83148f015720a"} Jan 30 12:17:38 crc kubenswrapper[4703]: I0130 12:17:38.261704 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-hrpzm" event={"ID":"3da2d370-06f5-4fcc-b58e-2676657e6e85","Type":"ContainerStarted","Data":"5550da3e1d2d27d9cc47f2560f39079093d4b5a2b126aebbb16dddd4de98a4ef"} Jan 30 12:17:38 crc kubenswrapper[4703]: I0130 12:17:38.270313 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-9gqcb" event={"ID":"5f0bf583-356b-4550-9257-ec624b1e5de1","Type":"ContainerStarted","Data":"8b75261f5087658d6065c79dbc80a36cab75a7635d5a2e56e959769b10d384e2"} Jan 30 12:17:38 crc kubenswrapper[4703]: I0130 12:17:38.270378 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:38 crc kubenswrapper[4703]: I0130 12:17:38.308760 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-9gqcb" podStartSLOduration=11.308729895 podStartE2EDuration="11.308729895s" podCreationTimestamp="2026-01-30 12:17:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:17:38.308273292 +0000 UTC m=+1294.086094966" watchObservedRunningTime="2026-01-30 12:17:38.308729895 +0000 UTC m=+1294.086551549" Jan 30 12:17:38 crc kubenswrapper[4703]: I0130 12:17:38.556826 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-x7jrq"] Jan 30 12:17:38 crc kubenswrapper[4703]: I0130 12:17:38.564600 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-x7jrq"] Jan 30 12:17:38 crc kubenswrapper[4703]: E0130 12:17:38.629395 4703 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod107ee781_3758_4b91_81d2_f934501de19e.slice/crio-1a4429f6b7be2777b7116dee67e75ced428a4c3dc7eb4c8da82b6d62615129f6.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf40c42c3_2575_425d_b80e_845f4a0a88b5.slice/crio-4582e0be712fec21723c0463761917ac26a0bebeb1bbba9c315c0c879e55974e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod107ee781_3758_4b91_81d2_f934501de19e.slice/crio-conmon-1a4429f6b7be2777b7116dee67e75ced428a4c3dc7eb4c8da82b6d62615129f6.scope\": RecentStats: unable to find data in memory cache]" Jan 30 12:17:39 crc kubenswrapper[4703]: I0130 12:17:39.111334 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a778462-d253-404e-acde-17f65431a62b" path="/var/lib/kubelet/pods/5a778462-d253-404e-acde-17f65431a62b/volumes" Jan 30 12:17:39 crc kubenswrapper[4703]: I0130 12:17:39.290416 4703 generic.go:334] "Generic (PLEG): container finished" podID="107ee781-3758-4b91-81d2-f934501de19e" containerID="1a4429f6b7be2777b7116dee67e75ced428a4c3dc7eb4c8da82b6d62615129f6" exitCode=0 Jan 30 12:17:39 crc kubenswrapper[4703]: I0130 12:17:39.290503 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-e48f-account-create-update-mkwjg" event={"ID":"107ee781-3758-4b91-81d2-f934501de19e","Type":"ContainerDied","Data":"1a4429f6b7be2777b7116dee67e75ced428a4c3dc7eb4c8da82b6d62615129f6"} Jan 30 12:17:39 crc kubenswrapper[4703]: I0130 12:17:39.297464 4703 generic.go:334] "Generic (PLEG): container finished" podID="f40c42c3-2575-425d-b80e-845f4a0a88b5" containerID="4582e0be712fec21723c0463761917ac26a0bebeb1bbba9c315c0c879e55974e" exitCode=0 Jan 30 12:17:39 crc kubenswrapper[4703]: I0130 12:17:39.297557 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ee05-account-create-update-dgl6z" event={"ID":"f40c42c3-2575-425d-b80e-845f4a0a88b5","Type":"ContainerDied","Data":"4582e0be712fec21723c0463761917ac26a0bebeb1bbba9c315c0c879e55974e"} Jan 30 12:17:39 crc kubenswrapper[4703]: I0130 12:17:39.300909 4703 generic.go:334] "Generic (PLEG): container finished" podID="1c7d9a80-1086-4991-89b9-d8dffe01eadd" containerID="87dea74478a4a8b66c31847598edf50382388fac013fe6a4a51054c3e092c2a4" exitCode=0 Jan 30 12:17:39 crc kubenswrapper[4703]: I0130 12:17:39.301257 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-badc-account-create-update-v5rg5" event={"ID":"1c7d9a80-1086-4991-89b9-d8dffe01eadd","Type":"ContainerDied","Data":"87dea74478a4a8b66c31847598edf50382388fac013fe6a4a51054c3e092c2a4"} Jan 30 12:17:40 crc kubenswrapper[4703]: I0130 12:17:40.315620 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-create-fdwhn" event={"ID":"2dfaefc7-3070-4d23-815d-164d669de123","Type":"ContainerDied","Data":"adf5617862018886dfeb634bcaafebb620a80a372f47903699b23018a9549a2d"} Jan 30 12:17:40 crc kubenswrapper[4703]: I0130 12:17:40.316286 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="adf5617862018886dfeb634bcaafebb620a80a372f47903699b23018a9549a2d" Jan 30 12:17:40 crc kubenswrapper[4703]: I0130 12:17:40.404670 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-fdwhn" Jan 30 12:17:40 crc kubenswrapper[4703]: I0130 12:17:40.510145 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4kj6\" (UniqueName: \"kubernetes.io/projected/2dfaefc7-3070-4d23-815d-164d669de123-kube-api-access-f4kj6\") pod \"2dfaefc7-3070-4d23-815d-164d669de123\" (UID: \"2dfaefc7-3070-4d23-815d-164d669de123\") " Jan 30 12:17:40 crc kubenswrapper[4703]: I0130 12:17:40.510207 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dfaefc7-3070-4d23-815d-164d669de123-operator-scripts\") pod \"2dfaefc7-3070-4d23-815d-164d669de123\" (UID: \"2dfaefc7-3070-4d23-815d-164d669de123\") " Jan 30 12:17:40 crc kubenswrapper[4703]: I0130 12:17:40.511903 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2dfaefc7-3070-4d23-815d-164d669de123-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2dfaefc7-3070-4d23-815d-164d669de123" (UID: "2dfaefc7-3070-4d23-815d-164d669de123"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:40 crc kubenswrapper[4703]: I0130 12:17:40.552579 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dfaefc7-3070-4d23-815d-164d669de123-kube-api-access-f4kj6" (OuterVolumeSpecName: "kube-api-access-f4kj6") pod "2dfaefc7-3070-4d23-815d-164d669de123" (UID: "2dfaefc7-3070-4d23-815d-164d669de123"). InnerVolumeSpecName "kube-api-access-f4kj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:17:40 crc kubenswrapper[4703]: I0130 12:17:40.613858 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dfaefc7-3070-4d23-815d-164d669de123-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:40 crc kubenswrapper[4703]: I0130 12:17:40.613931 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4kj6\" (UniqueName: \"kubernetes.io/projected/2dfaefc7-3070-4d23-815d-164d669de123-kube-api-access-f4kj6\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:40 crc kubenswrapper[4703]: I0130 12:17:40.721617 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:17:41 crc kubenswrapper[4703]: I0130 12:17:41.325868 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-create-fdwhn" Jan 30 12:17:42 crc kubenswrapper[4703]: I0130 12:17:42.351096 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-badc-account-create-update-v5rg5" event={"ID":"1c7d9a80-1086-4991-89b9-d8dffe01eadd","Type":"ContainerDied","Data":"4e2611cd9d4f89b7f872f0ab0f55952aa3783a8412fbbe8a7fbfb0d8e50c74ba"} Jan 30 12:17:42 crc kubenswrapper[4703]: I0130 12:17:42.351231 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e2611cd9d4f89b7f872f0ab0f55952aa3783a8412fbbe8a7fbfb0d8e50c74ba" Jan 30 12:17:42 crc kubenswrapper[4703]: I0130 12:17:42.352924 4703 generic.go:334] "Generic (PLEG): container finished" podID="aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" containerID="fa64bfe6d18e3452d8e0ab152d7a52f728f181e2221def319dc3e38ef1880ae1" exitCode=0 Jan 30 12:17:42 crc kubenswrapper[4703]: I0130 12:17:42.352989 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b","Type":"ContainerDied","Data":"fa64bfe6d18e3452d8e0ab152d7a52f728f181e2221def319dc3e38ef1880ae1"} Jan 30 12:17:42 crc kubenswrapper[4703]: I0130 12:17:42.439537 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-badc-account-create-update-v5rg5" Jan 30 12:17:42 crc kubenswrapper[4703]: I0130 12:17:42.590713 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nl7jd\" (UniqueName: \"kubernetes.io/projected/1c7d9a80-1086-4991-89b9-d8dffe01eadd-kube-api-access-nl7jd\") pod \"1c7d9a80-1086-4991-89b9-d8dffe01eadd\" (UID: \"1c7d9a80-1086-4991-89b9-d8dffe01eadd\") " Jan 30 12:17:42 crc kubenswrapper[4703]: I0130 12:17:42.590797 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c7d9a80-1086-4991-89b9-d8dffe01eadd-operator-scripts\") pod \"1c7d9a80-1086-4991-89b9-d8dffe01eadd\" (UID: \"1c7d9a80-1086-4991-89b9-d8dffe01eadd\") " Jan 30 12:17:42 crc kubenswrapper[4703]: I0130 12:17:42.591339 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c7d9a80-1086-4991-89b9-d8dffe01eadd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1c7d9a80-1086-4991-89b9-d8dffe01eadd" (UID: "1c7d9a80-1086-4991-89b9-d8dffe01eadd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:42 crc kubenswrapper[4703]: I0130 12:17:42.591577 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c7d9a80-1086-4991-89b9-d8dffe01eadd-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:42 crc kubenswrapper[4703]: I0130 12:17:42.602440 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c7d9a80-1086-4991-89b9-d8dffe01eadd-kube-api-access-nl7jd" (OuterVolumeSpecName: "kube-api-access-nl7jd") pod "1c7d9a80-1086-4991-89b9-d8dffe01eadd" (UID: "1c7d9a80-1086-4991-89b9-d8dffe01eadd"). InnerVolumeSpecName "kube-api-access-nl7jd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:17:42 crc kubenswrapper[4703]: I0130 12:17:42.693992 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nl7jd\" (UniqueName: \"kubernetes.io/projected/1c7d9a80-1086-4991-89b9-d8dffe01eadd-kube-api-access-nl7jd\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:42 crc kubenswrapper[4703]: I0130 12:17:42.823146 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:17:42 crc kubenswrapper[4703]: I0130 12:17:42.823235 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:17:42 crc kubenswrapper[4703]: I0130 12:17:42.823300 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 12:17:42 crc kubenswrapper[4703]: I0130 12:17:42.824272 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cc0f01995e25e263737b7a37c11076b9211642f05e9b4b225e1ac40c3094db02"} pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 12:17:42 crc kubenswrapper[4703]: I0130 12:17:42.824353 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" containerID="cri-o://cc0f01995e25e263737b7a37c11076b9211642f05e9b4b225e1ac40c3094db02" gracePeriod=600 Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.372561 4703 generic.go:334] "Generic (PLEG): container finished" podID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerID="cc0f01995e25e263737b7a37c11076b9211642f05e9b4b225e1ac40c3094db02" exitCode=0 Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.372681 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-badc-account-create-update-v5rg5" Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.373749 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerDied","Data":"cc0f01995e25e263737b7a37c11076b9211642f05e9b4b225e1ac40c3094db02"} Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.373829 4703 scope.go:117] "RemoveContainer" containerID="cc192085768faef3bd05075caea9f2c24722e52bca08578d68a4a914317757f0" Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.502346 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.591391 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-q9bcn"] Jan 30 12:17:43 crc kubenswrapper[4703]: E0130 12:17:43.591973 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dfaefc7-3070-4d23-815d-164d669de123" containerName="mariadb-database-create" Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.591994 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dfaefc7-3070-4d23-815d-164d669de123" containerName="mariadb-database-create" Jan 30 12:17:43 crc kubenswrapper[4703]: E0130 12:17:43.592018 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c7d9a80-1086-4991-89b9-d8dffe01eadd" containerName="mariadb-account-create-update" Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.592026 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c7d9a80-1086-4991-89b9-d8dffe01eadd" containerName="mariadb-account-create-update" Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.592419 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dfaefc7-3070-4d23-815d-164d669de123" containerName="mariadb-database-create" Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.592450 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c7d9a80-1086-4991-89b9-d8dffe01eadd" containerName="mariadb-account-create-update" Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.593417 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-q9bcn" Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.596659 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.605565 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-q9bcn"] Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.624095 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-x8lvt"] Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.625163 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" podUID="5fb4520e-7665-4d6b-8238-e3e3cf2a1306" containerName="dnsmasq-dns" containerID="cri-o://26b5b238812b8ade1655ca3ca8903878ea93de755f5814f3bb09bdd719cae71a" gracePeriod=10 Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.731401 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af9e761d-5f56-419b-8d45-0cadf63e675a-operator-scripts\") pod \"root-account-create-update-q9bcn\" (UID: \"af9e761d-5f56-419b-8d45-0cadf63e675a\") " pod="openstack/root-account-create-update-q9bcn" Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.731478 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvhnt\" (UniqueName: \"kubernetes.io/projected/af9e761d-5f56-419b-8d45-0cadf63e675a-kube-api-access-bvhnt\") pod \"root-account-create-update-q9bcn\" (UID: \"af9e761d-5f56-419b-8d45-0cadf63e675a\") " pod="openstack/root-account-create-update-q9bcn" Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.833189 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af9e761d-5f56-419b-8d45-0cadf63e675a-operator-scripts\") pod \"root-account-create-update-q9bcn\" (UID: \"af9e761d-5f56-419b-8d45-0cadf63e675a\") " pod="openstack/root-account-create-update-q9bcn" Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.833730 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvhnt\" (UniqueName: \"kubernetes.io/projected/af9e761d-5f56-419b-8d45-0cadf63e675a-kube-api-access-bvhnt\") pod \"root-account-create-update-q9bcn\" (UID: \"af9e761d-5f56-419b-8d45-0cadf63e675a\") " pod="openstack/root-account-create-update-q9bcn" Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.834113 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af9e761d-5f56-419b-8d45-0cadf63e675a-operator-scripts\") pod \"root-account-create-update-q9bcn\" (UID: \"af9e761d-5f56-419b-8d45-0cadf63e675a\") " pod="openstack/root-account-create-update-q9bcn" Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.857554 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvhnt\" (UniqueName: \"kubernetes.io/projected/af9e761d-5f56-419b-8d45-0cadf63e675a-kube-api-access-bvhnt\") pod \"root-account-create-update-q9bcn\" (UID: \"af9e761d-5f56-419b-8d45-0cadf63e675a\") " pod="openstack/root-account-create-update-q9bcn" Jan 30 12:17:43 crc kubenswrapper[4703]: I0130 12:17:43.923179 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-q9bcn" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.076379 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ee05-account-create-update-dgl6z" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.104478 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-l67m7" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.155874 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-e48f-account-create-update-mkwjg" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.160524 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-kttq7" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.247234 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0-operator-scripts\") pod \"1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0\" (UID: \"1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0\") " Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.247924 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zrtc\" (UniqueName: \"kubernetes.io/projected/47c89c69-b5ba-480b-bd12-1aeaaf6cbc01-kube-api-access-7zrtc\") pod \"47c89c69-b5ba-480b-bd12-1aeaaf6cbc01\" (UID: \"47c89c69-b5ba-480b-bd12-1aeaaf6cbc01\") " Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.248011 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47c89c69-b5ba-480b-bd12-1aeaaf6cbc01-operator-scripts\") pod \"47c89c69-b5ba-480b-bd12-1aeaaf6cbc01\" (UID: \"47c89c69-b5ba-480b-bd12-1aeaaf6cbc01\") " Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.248072 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jws2\" (UniqueName: \"kubernetes.io/projected/107ee781-3758-4b91-81d2-f934501de19e-kube-api-access-7jws2\") pod \"107ee781-3758-4b91-81d2-f934501de19e\" (UID: \"107ee781-3758-4b91-81d2-f934501de19e\") " Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.248171 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/107ee781-3758-4b91-81d2-f934501de19e-operator-scripts\") pod \"107ee781-3758-4b91-81d2-f934501de19e\" (UID: \"107ee781-3758-4b91-81d2-f934501de19e\") " Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.248211 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f40c42c3-2575-425d-b80e-845f4a0a88b5-operator-scripts\") pod \"f40c42c3-2575-425d-b80e-845f4a0a88b5\" (UID: \"f40c42c3-2575-425d-b80e-845f4a0a88b5\") " Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.248258 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhzf4\" (UniqueName: \"kubernetes.io/projected/1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0-kube-api-access-rhzf4\") pod \"1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0\" (UID: \"1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0\") " Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.248456 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxh2d\" (UniqueName: \"kubernetes.io/projected/f40c42c3-2575-425d-b80e-845f4a0a88b5-kube-api-access-pxh2d\") pod \"f40c42c3-2575-425d-b80e-845f4a0a88b5\" (UID: \"f40c42c3-2575-425d-b80e-845f4a0a88b5\") " Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.251647 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f40c42c3-2575-425d-b80e-845f4a0a88b5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f40c42c3-2575-425d-b80e-845f4a0a88b5" (UID: "f40c42c3-2575-425d-b80e-845f4a0a88b5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.258055 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0" (UID: "1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.262919 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/107ee781-3758-4b91-81d2-f934501de19e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "107ee781-3758-4b91-81d2-f934501de19e" (UID: "107ee781-3758-4b91-81d2-f934501de19e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.270410 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0-kube-api-access-rhzf4" (OuterVolumeSpecName: "kube-api-access-rhzf4") pod "1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0" (UID: "1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0"). InnerVolumeSpecName "kube-api-access-rhzf4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.277782 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47c89c69-b5ba-480b-bd12-1aeaaf6cbc01-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "47c89c69-b5ba-480b-bd12-1aeaaf6cbc01" (UID: "47c89c69-b5ba-480b-bd12-1aeaaf6cbc01"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.297914 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/107ee781-3758-4b91-81d2-f934501de19e-kube-api-access-7jws2" (OuterVolumeSpecName: "kube-api-access-7jws2") pod "107ee781-3758-4b91-81d2-f934501de19e" (UID: "107ee781-3758-4b91-81d2-f934501de19e"). InnerVolumeSpecName "kube-api-access-7jws2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.315531 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47c89c69-b5ba-480b-bd12-1aeaaf6cbc01-kube-api-access-7zrtc" (OuterVolumeSpecName: "kube-api-access-7zrtc") pod "47c89c69-b5ba-480b-bd12-1aeaaf6cbc01" (UID: "47c89c69-b5ba-480b-bd12-1aeaaf6cbc01"). InnerVolumeSpecName "kube-api-access-7zrtc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.315650 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f40c42c3-2575-425d-b80e-845f4a0a88b5-kube-api-access-pxh2d" (OuterVolumeSpecName: "kube-api-access-pxh2d") pod "f40c42c3-2575-425d-b80e-845f4a0a88b5" (UID: "f40c42c3-2575-425d-b80e-845f4a0a88b5"). InnerVolumeSpecName "kube-api-access-pxh2d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.377720 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxh2d\" (UniqueName: \"kubernetes.io/projected/f40c42c3-2575-425d-b80e-845f4a0a88b5-kube-api-access-pxh2d\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.378387 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.378403 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zrtc\" (UniqueName: \"kubernetes.io/projected/47c89c69-b5ba-480b-bd12-1aeaaf6cbc01-kube-api-access-7zrtc\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.378416 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47c89c69-b5ba-480b-bd12-1aeaaf6cbc01-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.378437 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7jws2\" (UniqueName: \"kubernetes.io/projected/107ee781-3758-4b91-81d2-f934501de19e-kube-api-access-7jws2\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.378454 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/107ee781-3758-4b91-81d2-f934501de19e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.378466 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f40c42c3-2575-425d-b80e-845f4a0a88b5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.378479 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhzf4\" (UniqueName: \"kubernetes.io/projected/1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0-kube-api-access-rhzf4\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.461218 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-kttq7" event={"ID":"47c89c69-b5ba-480b-bd12-1aeaaf6cbc01","Type":"ContainerDied","Data":"5edfeca83bf7b8e704a6507200fcd49261eeb7fae1286903415220a7caeebc8b"} Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.461293 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5edfeca83bf7b8e704a6507200fcd49261eeb7fae1286903415220a7caeebc8b" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.461394 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-kttq7" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.477079 4703 generic.go:334] "Generic (PLEG): container finished" podID="5fb4520e-7665-4d6b-8238-e3e3cf2a1306" containerID="26b5b238812b8ade1655ca3ca8903878ea93de755f5814f3bb09bdd719cae71a" exitCode=0 Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.477204 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" event={"ID":"5fb4520e-7665-4d6b-8238-e3e3cf2a1306","Type":"ContainerDied","Data":"26b5b238812b8ade1655ca3ca8903878ea93de755f5814f3bb09bdd719cae71a"} Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.526234 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b","Type":"ContainerStarted","Data":"1336b0d45f54ef019a4efeeffadf12d12099f883fab274d1069b9b8e1746c41d"} Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.528219 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.537471 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-l67m7" event={"ID":"1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0","Type":"ContainerDied","Data":"095935abbcd87128300869f876c16ad502d73456b7083ef1b348c48f5abdd73b"} Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.537551 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="095935abbcd87128300869f876c16ad502d73456b7083ef1b348c48f5abdd73b" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.537676 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-l67m7" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.545564 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-e48f-account-create-update-mkwjg" event={"ID":"107ee781-3758-4b91-81d2-f934501de19e","Type":"ContainerDied","Data":"4d3a7f67e750a445f5351e295841aaea09395efa81955ce5a3a44ff63d577aea"} Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.545609 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d3a7f67e750a445f5351e295841aaea09395efa81955ce5a3a44ff63d577aea" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.545689 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-e48f-account-create-update-mkwjg" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.607412 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ee05-account-create-update-dgl6z" event={"ID":"f40c42c3-2575-425d-b80e-845f4a0a88b5","Type":"ContainerDied","Data":"9e57f4312751e2f148ff66a27558803a04fc1339237b204a7fbcda8cdd7b5048"} Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.607472 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9e57f4312751e2f148ff66a27558803a04fc1339237b204a7fbcda8cdd7b5048" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.607482 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ee05-account-create-update-dgl6z" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.629111 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=40.45673552 podStartE2EDuration="1m24.629082261s" podCreationTimestamp="2026-01-30 12:16:20 +0000 UTC" firstStartedPulling="2026-01-30 12:16:23.426037421 +0000 UTC m=+1219.203859075" lastFinishedPulling="2026-01-30 12:17:07.598384162 +0000 UTC m=+1263.376205816" observedRunningTime="2026-01-30 12:17:44.615868525 +0000 UTC m=+1300.393690179" watchObservedRunningTime="2026-01-30 12:17:44.629082261 +0000 UTC m=+1300.406903915" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.745470 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.905201 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-ovsdbserver-sb\") pod \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.905826 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-ovsdbserver-nb\") pod \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.905885 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-dns-svc\") pod \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.905914 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-config\") pod \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.906170 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4lgq5\" (UniqueName: \"kubernetes.io/projected/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-kube-api-access-4lgq5\") pod \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\" (UID: \"5fb4520e-7665-4d6b-8238-e3e3cf2a1306\") " Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.922874 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-kube-api-access-4lgq5" (OuterVolumeSpecName: "kube-api-access-4lgq5") pod "5fb4520e-7665-4d6b-8238-e3e3cf2a1306" (UID: "5fb4520e-7665-4d6b-8238-e3e3cf2a1306"). InnerVolumeSpecName "kube-api-access-4lgq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:17:44 crc kubenswrapper[4703]: I0130 12:17:44.974342 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-q9bcn"] Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.009264 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4lgq5\" (UniqueName: \"kubernetes.io/projected/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-kube-api-access-4lgq5\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.082593 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5fb4520e-7665-4d6b-8238-e3e3cf2a1306" (UID: "5fb4520e-7665-4d6b-8238-e3e3cf2a1306"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.111038 4703 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.205414 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-config" (OuterVolumeSpecName: "config") pod "5fb4520e-7665-4d6b-8238-e3e3cf2a1306" (UID: "5fb4520e-7665-4d6b-8238-e3e3cf2a1306"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.208631 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5fb4520e-7665-4d6b-8238-e3e3cf2a1306" (UID: "5fb4520e-7665-4d6b-8238-e3e3cf2a1306"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.210171 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5fb4520e-7665-4d6b-8238-e3e3cf2a1306" (UID: "5fb4520e-7665-4d6b-8238-e3e3cf2a1306"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.212899 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.213169 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.213193 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.213205 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5fb4520e-7665-4d6b-8238-e3e3cf2a1306-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:45 crc kubenswrapper[4703]: E0130 12:17:45.213348 4703 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 30 12:17:45 crc kubenswrapper[4703]: E0130 12:17:45.213363 4703 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 30 12:17:45 crc kubenswrapper[4703]: E0130 12:17:45.213433 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift podName:6aed7811-b088-403c-bbef-7844c17d52ff nodeName:}" failed. No retries permitted until 2026-01-30 12:18:01.213413315 +0000 UTC m=+1316.991234969 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift") pod "swift-storage-0" (UID: "6aed7811-b088-403c-bbef-7844c17d52ff") : configmap "swift-ring-files" not found Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.624675 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerStarted","Data":"ae09ea0f762d55711d4ebe52875d9283498e826f1ea02651fb958e545587bc81"} Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.627849 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea","Type":"ContainerStarted","Data":"617323407efed4f3cf6347b52ee3c150a8e944b111338ae55508f6db39fe9bbc"} Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.631836 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-v4pc7" event={"ID":"2688299b-7d5f-4fad-9fd9-78de6b83b333","Type":"ContainerStarted","Data":"bd8d3bdfa8d3ca3b04c0e1d737a0b07ce00edef4f668b235e10c9d8edef6babd"} Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.636875 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-q9bcn" event={"ID":"af9e761d-5f56-419b-8d45-0cadf63e675a","Type":"ContainerStarted","Data":"514a2d6e7b37b449caf2e930b480e70df668ebd7e2ab6100b3ce271f8430bd76"} Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.656816 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8e865e8b-7723-4aed-b51c-ce7a8da59d13","Type":"ContainerStarted","Data":"ae187ad045ba46adca2a50abcb81635324be57add273df5f7f1986b348e18cea"} Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.665231 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.668426 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-x8lvt" event={"ID":"5fb4520e-7665-4d6b-8238-e3e3cf2a1306","Type":"ContainerDied","Data":"9eedb1b7ddf1164dede5de5999cecbdd11be6199b5f5ad6d17bb415fa60ad974"} Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.668549 4703 scope.go:117] "RemoveContainer" containerID="26b5b238812b8ade1655ca3ca8903878ea93de755f5814f3bb09bdd719cae71a" Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.675992 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-v4pc7" podStartSLOduration=6.233538183 podStartE2EDuration="16.675969351s" podCreationTimestamp="2026-01-30 12:17:29 +0000 UTC" firstStartedPulling="2026-01-30 12:17:33.724971789 +0000 UTC m=+1289.502793443" lastFinishedPulling="2026-01-30 12:17:44.167402957 +0000 UTC m=+1299.945224611" observedRunningTime="2026-01-30 12:17:45.671173653 +0000 UTC m=+1301.448995317" watchObservedRunningTime="2026-01-30 12:17:45.675969351 +0000 UTC m=+1301.453791005" Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.750561 4703 scope.go:117] "RemoveContainer" containerID="380f775541011a72cfca0088e38a836dc71885f2bcebcc88d005a9f77516946d" Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.764589 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=4.7136769560000005 podStartE2EDuration="1m18.764546926s" podCreationTimestamp="2026-01-30 12:16:27 +0000 UTC" firstStartedPulling="2026-01-30 12:16:30.020792961 +0000 UTC m=+1225.798614615" lastFinishedPulling="2026-01-30 12:17:44.071662941 +0000 UTC m=+1299.849484585" observedRunningTime="2026-01-30 12:17:45.719516914 +0000 UTC m=+1301.497338588" watchObservedRunningTime="2026-01-30 12:17:45.764546926 +0000 UTC m=+1301.542368590" Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.814320 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-tg9tq" Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.816911 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-x8lvt"] Jan 30 12:17:45 crc kubenswrapper[4703]: I0130 12:17:45.839227 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-x8lvt"] Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.059978 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-rjbtf-config-tx2jm"] Jan 30 12:17:46 crc kubenswrapper[4703]: E0130 12:17:46.060907 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="107ee781-3758-4b91-81d2-f934501de19e" containerName="mariadb-account-create-update" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.060997 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="107ee781-3758-4b91-81d2-f934501de19e" containerName="mariadb-account-create-update" Jan 30 12:17:46 crc kubenswrapper[4703]: E0130 12:17:46.061074 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f40c42c3-2575-425d-b80e-845f4a0a88b5" containerName="mariadb-account-create-update" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.061307 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f40c42c3-2575-425d-b80e-845f4a0a88b5" containerName="mariadb-account-create-update" Jan 30 12:17:46 crc kubenswrapper[4703]: E0130 12:17:46.061408 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fb4520e-7665-4d6b-8238-e3e3cf2a1306" containerName="init" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.061478 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fb4520e-7665-4d6b-8238-e3e3cf2a1306" containerName="init" Jan 30 12:17:46 crc kubenswrapper[4703]: E0130 12:17:46.061553 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5fb4520e-7665-4d6b-8238-e3e3cf2a1306" containerName="dnsmasq-dns" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.061622 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="5fb4520e-7665-4d6b-8238-e3e3cf2a1306" containerName="dnsmasq-dns" Jan 30 12:17:46 crc kubenswrapper[4703]: E0130 12:17:46.061698 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0" containerName="mariadb-database-create" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.061762 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0" containerName="mariadb-database-create" Jan 30 12:17:46 crc kubenswrapper[4703]: E0130 12:17:46.061828 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47c89c69-b5ba-480b-bd12-1aeaaf6cbc01" containerName="mariadb-database-create" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.061887 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="47c89c69-b5ba-480b-bd12-1aeaaf6cbc01" containerName="mariadb-database-create" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.062214 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="47c89c69-b5ba-480b-bd12-1aeaaf6cbc01" containerName="mariadb-database-create" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.062306 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="107ee781-3758-4b91-81d2-f934501de19e" containerName="mariadb-account-create-update" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.062372 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="f40c42c3-2575-425d-b80e-845f4a0a88b5" containerName="mariadb-account-create-update" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.062448 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="5fb4520e-7665-4d6b-8238-e3e3cf2a1306" containerName="dnsmasq-dns" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.062567 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0" containerName="mariadb-database-create" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.063486 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.075071 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.081641 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-rjbtf-config-tx2jm"] Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.139225 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksknp\" (UniqueName: \"kubernetes.io/projected/42dcc005-28fe-42d6-9d76-70c950797d16-kube-api-access-ksknp\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.139841 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42dcc005-28fe-42d6-9d76-70c950797d16-scripts\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.139919 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-run\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.140088 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-log-ovn\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.140112 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42dcc005-28fe-42d6-9d76-70c950797d16-additional-scripts\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.140157 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-run-ovn\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.242730 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-log-ovn\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.242905 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42dcc005-28fe-42d6-9d76-70c950797d16-additional-scripts\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.243257 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-log-ovn\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.243303 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-run-ovn\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.243915 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42dcc005-28fe-42d6-9d76-70c950797d16-additional-scripts\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.243982 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-run-ovn\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.244160 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksknp\" (UniqueName: \"kubernetes.io/projected/42dcc005-28fe-42d6-9d76-70c950797d16-kube-api-access-ksknp\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.245101 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42dcc005-28fe-42d6-9d76-70c950797d16-scripts\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.245278 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-run\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.245448 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-run\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.247482 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42dcc005-28fe-42d6-9d76-70c950797d16-scripts\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.268945 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksknp\" (UniqueName: \"kubernetes.io/projected/42dcc005-28fe-42d6-9d76-70c950797d16-kube-api-access-ksknp\") pod \"ovn-controller-rjbtf-config-tx2jm\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.391243 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.683869 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-q9bcn" event={"ID":"af9e761d-5f56-419b-8d45-0cadf63e675a","Type":"ContainerStarted","Data":"70d535f221182fdf74ffd4bf95b25fa5d6ab150e020d06a40ae98e25bac59a9f"} Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.690478 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea","Type":"ContainerStarted","Data":"c0dbe86de74ab2c3b9b59f3330aba781aa9706f2762f61045303c7e012014a76"} Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.720094 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-q9bcn" podStartSLOduration=3.720053017 podStartE2EDuration="3.720053017s" podCreationTimestamp="2026-01-30 12:17:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:17:46.705232629 +0000 UTC m=+1302.483054283" watchObservedRunningTime="2026-01-30 12:17:46.720053017 +0000 UTC m=+1302.497874671" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.721689 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 30 12:17:46 crc kubenswrapper[4703]: I0130 12:17:46.740328 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=10.899539004 podStartE2EDuration="20.740297843s" podCreationTimestamp="2026-01-30 12:17:26 +0000 UTC" firstStartedPulling="2026-01-30 12:17:34.109416383 +0000 UTC m=+1289.887238047" lastFinishedPulling="2026-01-30 12:17:43.950175232 +0000 UTC m=+1299.727996886" observedRunningTime="2026-01-30 12:17:46.736612403 +0000 UTC m=+1302.514434057" watchObservedRunningTime="2026-01-30 12:17:46.740297843 +0000 UTC m=+1302.518119487" Jan 30 12:17:47 crc kubenswrapper[4703]: I0130 12:17:47.102381 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fb4520e-7665-4d6b-8238-e3e3cf2a1306" path="/var/lib/kubelet/pods/5fb4520e-7665-4d6b-8238-e3e3cf2a1306/volumes" Jan 30 12:17:48 crc kubenswrapper[4703]: I0130 12:17:48.718868 4703 generic.go:334] "Generic (PLEG): container finished" podID="af9e761d-5f56-419b-8d45-0cadf63e675a" containerID="70d535f221182fdf74ffd4bf95b25fa5d6ab150e020d06a40ae98e25bac59a9f" exitCode=0 Jan 30 12:17:48 crc kubenswrapper[4703]: I0130 12:17:48.718959 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-q9bcn" event={"ID":"af9e761d-5f56-419b-8d45-0cadf63e675a","Type":"ContainerDied","Data":"70d535f221182fdf74ffd4bf95b25fa5d6ab150e020d06a40ae98e25bac59a9f"} Jan 30 12:17:48 crc kubenswrapper[4703]: I0130 12:17:48.724475 4703 generic.go:334] "Generic (PLEG): container finished" podID="0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" containerID="481cd73606bf3baac9bc19db3659ddfb18cc1af4e8c3a636d9694a00c056d5b7" exitCode=0 Jan 30 12:17:48 crc kubenswrapper[4703]: I0130 12:17:48.724586 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5","Type":"ContainerDied","Data":"481cd73606bf3baac9bc19db3659ddfb18cc1af4e8c3a636d9694a00c056d5b7"} Jan 30 12:17:48 crc kubenswrapper[4703]: I0130 12:17:48.977790 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 30 12:17:50 crc kubenswrapper[4703]: I0130 12:17:50.709108 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-rjbtf" podUID="fd146d96-e737-48a6-a3e4-d414913da90f" containerName="ovn-controller" probeResult="failure" output=< Jan 30 12:17:50 crc kubenswrapper[4703]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 30 12:17:50 crc kubenswrapper[4703]: > Jan 30 12:17:55 crc kubenswrapper[4703]: I0130 12:17:55.654885 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-rjbtf" podUID="fd146d96-e737-48a6-a3e4-d414913da90f" containerName="ovn-controller" probeResult="failure" output=< Jan 30 12:17:55 crc kubenswrapper[4703]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 30 12:17:55 crc kubenswrapper[4703]: > Jan 30 12:17:55 crc kubenswrapper[4703]: I0130 12:17:55.943487 4703 generic.go:334] "Generic (PLEG): container finished" podID="2688299b-7d5f-4fad-9fd9-78de6b83b333" containerID="bd8d3bdfa8d3ca3b04c0e1d737a0b07ce00edef4f668b235e10c9d8edef6babd" exitCode=0 Jan 30 12:17:55 crc kubenswrapper[4703]: I0130 12:17:55.943560 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-v4pc7" event={"ID":"2688299b-7d5f-4fad-9fd9-78de6b83b333","Type":"ContainerDied","Data":"bd8d3bdfa8d3ca3b04c0e1d737a0b07ce00edef4f668b235e10c9d8edef6babd"} Jan 30 12:17:56 crc kubenswrapper[4703]: I0130 12:17:56.812723 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.322445 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.346192 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-q9bcn" Jan 30 12:17:58 crc kubenswrapper[4703]: E0130 12:17:58.492398 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Jan 30 12:17:58 crc kubenswrapper[4703]: E0130 12:17:58.493203 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-p6tfg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-hrpzm_openstack(3da2d370-06f5-4fcc-b58e-2676657e6e85): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:17:58 crc kubenswrapper[4703]: E0130 12:17:58.494597 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-hrpzm" podUID="3da2d370-06f5-4fcc-b58e-2676657e6e85" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.510575 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-dispersionconf\") pod \"2688299b-7d5f-4fad-9fd9-78de6b83b333\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.510632 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zzwcv\" (UniqueName: \"kubernetes.io/projected/2688299b-7d5f-4fad-9fd9-78de6b83b333-kube-api-access-zzwcv\") pod \"2688299b-7d5f-4fad-9fd9-78de6b83b333\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.510736 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-swiftconf\") pod \"2688299b-7d5f-4fad-9fd9-78de6b83b333\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.510817 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2688299b-7d5f-4fad-9fd9-78de6b83b333-etc-swift\") pod \"2688299b-7d5f-4fad-9fd9-78de6b83b333\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.510896 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2688299b-7d5f-4fad-9fd9-78de6b83b333-scripts\") pod \"2688299b-7d5f-4fad-9fd9-78de6b83b333\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.510929 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2688299b-7d5f-4fad-9fd9-78de6b83b333-ring-data-devices\") pod \"2688299b-7d5f-4fad-9fd9-78de6b83b333\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.510967 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af9e761d-5f56-419b-8d45-0cadf63e675a-operator-scripts\") pod \"af9e761d-5f56-419b-8d45-0cadf63e675a\" (UID: \"af9e761d-5f56-419b-8d45-0cadf63e675a\") " Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.510990 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-combined-ca-bundle\") pod \"2688299b-7d5f-4fad-9fd9-78de6b83b333\" (UID: \"2688299b-7d5f-4fad-9fd9-78de6b83b333\") " Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.511074 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvhnt\" (UniqueName: \"kubernetes.io/projected/af9e761d-5f56-419b-8d45-0cadf63e675a-kube-api-access-bvhnt\") pod \"af9e761d-5f56-419b-8d45-0cadf63e675a\" (UID: \"af9e761d-5f56-419b-8d45-0cadf63e675a\") " Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.515753 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2688299b-7d5f-4fad-9fd9-78de6b83b333-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "2688299b-7d5f-4fad-9fd9-78de6b83b333" (UID: "2688299b-7d5f-4fad-9fd9-78de6b83b333"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.517183 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2688299b-7d5f-4fad-9fd9-78de6b83b333-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "2688299b-7d5f-4fad-9fd9-78de6b83b333" (UID: "2688299b-7d5f-4fad-9fd9-78de6b83b333"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.519181 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/af9e761d-5f56-419b-8d45-0cadf63e675a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "af9e761d-5f56-419b-8d45-0cadf63e675a" (UID: "af9e761d-5f56-419b-8d45-0cadf63e675a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.526594 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/af9e761d-5f56-419b-8d45-0cadf63e675a-kube-api-access-bvhnt" (OuterVolumeSpecName: "kube-api-access-bvhnt") pod "af9e761d-5f56-419b-8d45-0cadf63e675a" (UID: "af9e761d-5f56-419b-8d45-0cadf63e675a"). InnerVolumeSpecName "kube-api-access-bvhnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.558838 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2688299b-7d5f-4fad-9fd9-78de6b83b333-kube-api-access-zzwcv" (OuterVolumeSpecName: "kube-api-access-zzwcv") pod "2688299b-7d5f-4fad-9fd9-78de6b83b333" (UID: "2688299b-7d5f-4fad-9fd9-78de6b83b333"). InnerVolumeSpecName "kube-api-access-zzwcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.561158 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "2688299b-7d5f-4fad-9fd9-78de6b83b333" (UID: "2688299b-7d5f-4fad-9fd9-78de6b83b333"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.574751 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "2688299b-7d5f-4fad-9fd9-78de6b83b333" (UID: "2688299b-7d5f-4fad-9fd9-78de6b83b333"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.578451 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2688299b-7d5f-4fad-9fd9-78de6b83b333-scripts" (OuterVolumeSpecName: "scripts") pod "2688299b-7d5f-4fad-9fd9-78de6b83b333" (UID: "2688299b-7d5f-4fad-9fd9-78de6b83b333"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.585998 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2688299b-7d5f-4fad-9fd9-78de6b83b333" (UID: "2688299b-7d5f-4fad-9fd9-78de6b83b333"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.612588 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvhnt\" (UniqueName: \"kubernetes.io/projected/af9e761d-5f56-419b-8d45-0cadf63e675a-kube-api-access-bvhnt\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.612622 4703 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.612634 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zzwcv\" (UniqueName: \"kubernetes.io/projected/2688299b-7d5f-4fad-9fd9-78de6b83b333-kube-api-access-zzwcv\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.612669 4703 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.612680 4703 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/2688299b-7d5f-4fad-9fd9-78de6b83b333-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.612688 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2688299b-7d5f-4fad-9fd9-78de6b83b333-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.612696 4703 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/2688299b-7d5f-4fad-9fd9-78de6b83b333-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.612706 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/af9e761d-5f56-419b-8d45-0cadf63e675a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.612714 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2688299b-7d5f-4fad-9fd9-78de6b83b333-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.874057 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-rjbtf-config-tx2jm"] Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.977378 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.984401 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.990016 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5","Type":"ContainerStarted","Data":"eef3e6e64c855e27b5c3120003637e4d0d0bb8bb8fb57989469aeb4a3bea0a85"} Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.990876 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.996836 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-v4pc7" event={"ID":"2688299b-7d5f-4fad-9fd9-78de6b83b333","Type":"ContainerDied","Data":"4179c3d89116126dce7955df011c342d1c90271782254510d50f740f90bd1eb8"} Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.997041 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4179c3d89116126dce7955df011c342d1c90271782254510d50f740f90bd1eb8" Jan 30 12:17:58 crc kubenswrapper[4703]: I0130 12:17:58.997276 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-v4pc7" Jan 30 12:17:59 crc kubenswrapper[4703]: I0130 12:17:59.004549 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-rjbtf-config-tx2jm" event={"ID":"42dcc005-28fe-42d6-9d76-70c950797d16","Type":"ContainerStarted","Data":"35b5997307ee219c4a4c6fb84ccda96e47fde18ca83c0d28259f83ee6864899f"} Jan 30 12:17:59 crc kubenswrapper[4703]: I0130 12:17:59.017992 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-q9bcn" Jan 30 12:17:59 crc kubenswrapper[4703]: E0130 12:17:59.027165 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-hrpzm" podUID="3da2d370-06f5-4fcc-b58e-2676657e6e85" Jan 30 12:17:59 crc kubenswrapper[4703]: I0130 12:17:59.028443 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-q9bcn" event={"ID":"af9e761d-5f56-419b-8d45-0cadf63e675a","Type":"ContainerDied","Data":"514a2d6e7b37b449caf2e930b480e70df668ebd7e2ab6100b3ce271f8430bd76"} Jan 30 12:17:59 crc kubenswrapper[4703]: I0130 12:17:59.028514 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="514a2d6e7b37b449caf2e930b480e70df668ebd7e2ab6100b3ce271f8430bd76" Jan 30 12:17:59 crc kubenswrapper[4703]: I0130 12:17:59.066877 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=-9223371937.78793 podStartE2EDuration="1m39.066846088s" podCreationTimestamp="2026-01-30 12:16:20 +0000 UTC" firstStartedPulling="2026-01-30 12:16:23.355693917 +0000 UTC m=+1219.133515571" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:17:59.057047914 +0000 UTC m=+1314.834869598" watchObservedRunningTime="2026-01-30 12:17:59.066846088 +0000 UTC m=+1314.844667742" Jan 30 12:17:59 crc kubenswrapper[4703]: E0130 12:17:59.187178 4703 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf9e761d_5f56_419b_8d45_0cadf63e675a.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf9e761d_5f56_419b_8d45_0cadf63e675a.slice/crio-514a2d6e7b37b449caf2e930b480e70df668ebd7e2ab6100b3ce271f8430bd76\": RecentStats: unable to find data in memory cache]" Jan 30 12:18:00 crc kubenswrapper[4703]: I0130 12:18:00.028187 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-rjbtf-config-tx2jm" event={"ID":"42dcc005-28fe-42d6-9d76-70c950797d16","Type":"ContainerStarted","Data":"0bd65d40ff36dcdf736ec5c43b3d91c6169e7a13217349bf119e9e118c38285c"} Jan 30 12:18:00 crc kubenswrapper[4703]: I0130 12:18:00.151683 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:00 crc kubenswrapper[4703]: I0130 12:18:00.260477 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-rjbtf-config-tx2jm" podStartSLOduration=14.260444967 podStartE2EDuration="14.260444967s" podCreationTimestamp="2026-01-30 12:17:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:18:00.245504505 +0000 UTC m=+1316.023326159" watchObservedRunningTime="2026-01-30 12:18:00.260444967 +0000 UTC m=+1316.038266621" Jan 30 12:18:00 crc kubenswrapper[4703]: I0130 12:18:00.645515 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-rjbtf" Jan 30 12:18:01 crc kubenswrapper[4703]: I0130 12:18:01.039801 4703 generic.go:334] "Generic (PLEG): container finished" podID="42dcc005-28fe-42d6-9d76-70c950797d16" containerID="0bd65d40ff36dcdf736ec5c43b3d91c6169e7a13217349bf119e9e118c38285c" exitCode=0 Jan 30 12:18:01 crc kubenswrapper[4703]: I0130 12:18:01.039934 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-rjbtf-config-tx2jm" event={"ID":"42dcc005-28fe-42d6-9d76-70c950797d16","Type":"ContainerDied","Data":"0bd65d40ff36dcdf736ec5c43b3d91c6169e7a13217349bf119e9e118c38285c"} Jan 30 12:18:01 crc kubenswrapper[4703]: I0130 12:18:01.250098 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:18:01 crc kubenswrapper[4703]: I0130 12:18:01.262354 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/6aed7811-b088-403c-bbef-7844c17d52ff-etc-swift\") pod \"swift-storage-0\" (UID: \"6aed7811-b088-403c-bbef-7844c17d52ff\") " pod="openstack/swift-storage-0" Jan 30 12:18:01 crc kubenswrapper[4703]: I0130 12:18:01.508181 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.026811 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.058376 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6aed7811-b088-403c-bbef-7844c17d52ff","Type":"ContainerStarted","Data":"91ee938dec573c13994e35cd2d83999586769740a75930b01828b84606df0268"} Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.217658 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.503713 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.585561 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42dcc005-28fe-42d6-9d76-70c950797d16-additional-scripts\") pod \"42dcc005-28fe-42d6-9d76-70c950797d16\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.585799 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-log-ovn\") pod \"42dcc005-28fe-42d6-9d76-70c950797d16\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.585900 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42dcc005-28fe-42d6-9d76-70c950797d16-scripts\") pod \"42dcc005-28fe-42d6-9d76-70c950797d16\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.585932 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-run-ovn\") pod \"42dcc005-28fe-42d6-9d76-70c950797d16\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.586001 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-run\") pod \"42dcc005-28fe-42d6-9d76-70c950797d16\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.586006 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "42dcc005-28fe-42d6-9d76-70c950797d16" (UID: "42dcc005-28fe-42d6-9d76-70c950797d16"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.586096 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ksknp\" (UniqueName: \"kubernetes.io/projected/42dcc005-28fe-42d6-9d76-70c950797d16-kube-api-access-ksknp\") pod \"42dcc005-28fe-42d6-9d76-70c950797d16\" (UID: \"42dcc005-28fe-42d6-9d76-70c950797d16\") " Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.586111 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-run" (OuterVolumeSpecName: "var-run") pod "42dcc005-28fe-42d6-9d76-70c950797d16" (UID: "42dcc005-28fe-42d6-9d76-70c950797d16"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.586297 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "42dcc005-28fe-42d6-9d76-70c950797d16" (UID: "42dcc005-28fe-42d6-9d76-70c950797d16"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.586666 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42dcc005-28fe-42d6-9d76-70c950797d16-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "42dcc005-28fe-42d6-9d76-70c950797d16" (UID: "42dcc005-28fe-42d6-9d76-70c950797d16"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.587000 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42dcc005-28fe-42d6-9d76-70c950797d16-scripts" (OuterVolumeSpecName: "scripts") pod "42dcc005-28fe-42d6-9d76-70c950797d16" (UID: "42dcc005-28fe-42d6-9d76-70c950797d16"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.587152 4703 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/42dcc005-28fe-42d6-9d76-70c950797d16-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.587170 4703 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.587181 4703 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.587191 4703 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/42dcc005-28fe-42d6-9d76-70c950797d16-var-run\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.621527 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42dcc005-28fe-42d6-9d76-70c950797d16-kube-api-access-ksknp" (OuterVolumeSpecName: "kube-api-access-ksknp") pod "42dcc005-28fe-42d6-9d76-70c950797d16" (UID: "42dcc005-28fe-42d6-9d76-70c950797d16"). InnerVolumeSpecName "kube-api-access-ksknp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.689062 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/42dcc005-28fe-42d6-9d76-70c950797d16-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:02 crc kubenswrapper[4703]: I0130 12:18:02.689150 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ksknp\" (UniqueName: \"kubernetes.io/projected/42dcc005-28fe-42d6-9d76-70c950797d16-kube-api-access-ksknp\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:03 crc kubenswrapper[4703]: I0130 12:18:03.073406 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-rjbtf-config-tx2jm" event={"ID":"42dcc005-28fe-42d6-9d76-70c950797d16","Type":"ContainerDied","Data":"35b5997307ee219c4a4c6fb84ccda96e47fde18ca83c0d28259f83ee6864899f"} Jan 30 12:18:03 crc kubenswrapper[4703]: I0130 12:18:03.074042 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="35b5997307ee219c4a4c6fb84ccda96e47fde18ca83c0d28259f83ee6864899f" Jan 30 12:18:03 crc kubenswrapper[4703]: I0130 12:18:03.073513 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-rjbtf-config-tx2jm" Jan 30 12:18:03 crc kubenswrapper[4703]: I0130 12:18:03.696267 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-rjbtf-config-tx2jm"] Jan 30 12:18:03 crc kubenswrapper[4703]: I0130 12:18:03.705603 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-rjbtf-config-tx2jm"] Jan 30 12:18:03 crc kubenswrapper[4703]: I0130 12:18:03.734914 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 30 12:18:03 crc kubenswrapper[4703]: I0130 12:18:03.735278 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerName="prometheus" containerID="cri-o://91438f56e553120198981dddc1f661e2c9ae1348a4ca7f5c03c2fa8964e87fc2" gracePeriod=600 Jan 30 12:18:03 crc kubenswrapper[4703]: I0130 12:18:03.735428 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerName="thanos-sidecar" containerID="cri-o://ae187ad045ba46adca2a50abcb81635324be57add273df5f7f1986b348e18cea" gracePeriod=600 Jan 30 12:18:03 crc kubenswrapper[4703]: I0130 12:18:03.735474 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerName="config-reloader" containerID="cri-o://a7b99b52d4717eddd1653ce555492f68bc313f64849651afe1894b2bdbafc0b0" gracePeriod=600 Jan 30 12:18:03 crc kubenswrapper[4703]: I0130 12:18:03.979042 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.0.111:9090/-/ready\": dial tcp 10.217.0.111:9090: connect: connection refused" Jan 30 12:18:04 crc kubenswrapper[4703]: I0130 12:18:04.089931 4703 generic.go:334] "Generic (PLEG): container finished" podID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerID="ae187ad045ba46adca2a50abcb81635324be57add273df5f7f1986b348e18cea" exitCode=0 Jan 30 12:18:04 crc kubenswrapper[4703]: I0130 12:18:04.089979 4703 generic.go:334] "Generic (PLEG): container finished" podID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerID="91438f56e553120198981dddc1f661e2c9ae1348a4ca7f5c03c2fa8964e87fc2" exitCode=0 Jan 30 12:18:04 crc kubenswrapper[4703]: I0130 12:18:04.090016 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8e865e8b-7723-4aed-b51c-ce7a8da59d13","Type":"ContainerDied","Data":"ae187ad045ba46adca2a50abcb81635324be57add273df5f7f1986b348e18cea"} Jan 30 12:18:04 crc kubenswrapper[4703]: I0130 12:18:04.090087 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8e865e8b-7723-4aed-b51c-ce7a8da59d13","Type":"ContainerDied","Data":"91438f56e553120198981dddc1f661e2c9ae1348a4ca7f5c03c2fa8964e87fc2"} Jan 30 12:18:04 crc kubenswrapper[4703]: I0130 12:18:04.095294 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6aed7811-b088-403c-bbef-7844c17d52ff","Type":"ContainerStarted","Data":"3341f0739aa88922d9792eb42d0840dc7addf938be6be7151ac7c56ae35fa06a"} Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.083045 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.103336 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42dcc005-28fe-42d6-9d76-70c950797d16" path="/var/lib/kubelet/pods/42dcc005-28fe-42d6-9d76-70c950797d16/volumes" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.130730 4703 generic.go:334] "Generic (PLEG): container finished" podID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerID="a7b99b52d4717eddd1653ce555492f68bc313f64849651afe1894b2bdbafc0b0" exitCode=0 Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.131323 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8e865e8b-7723-4aed-b51c-ce7a8da59d13","Type":"ContainerDied","Data":"a7b99b52d4717eddd1653ce555492f68bc313f64849651afe1894b2bdbafc0b0"} Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.131351 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.131377 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"8e865e8b-7723-4aed-b51c-ce7a8da59d13","Type":"ContainerDied","Data":"499823e6026b73c90021591cf3b6c9354fc21c44ed9e857cb318f9b4045b8f62"} Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.131409 4703 scope.go:117] "RemoveContainer" containerID="ae187ad045ba46adca2a50abcb81635324be57add273df5f7f1986b348e18cea" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.143385 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6aed7811-b088-403c-bbef-7844c17d52ff","Type":"ContainerStarted","Data":"0effca31f306f91029d09ebf7969eec7e2b2948c06711685188aa5cbe0cad3ff"} Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.143449 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6aed7811-b088-403c-bbef-7844c17d52ff","Type":"ContainerStarted","Data":"a018ca8a9b601630a8134311cb109378c59db2e6673c8f1958faccd38ca6879f"} Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.156843 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-config\") pod \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.156907 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-web-config\") pod \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.156947 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spssg\" (UniqueName: \"kubernetes.io/projected/8e865e8b-7723-4aed-b51c-ce7a8da59d13-kube-api-access-spssg\") pod \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.156973 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8e865e8b-7723-4aed-b51c-ce7a8da59d13-config-out\") pod \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.157007 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-1\") pod \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.157164 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") pod \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.157195 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-2\") pod \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.157241 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-thanos-prometheus-http-client-file\") pod \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.157295 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8e865e8b-7723-4aed-b51c-ce7a8da59d13-tls-assets\") pod \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.157328 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-0\") pod \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\" (UID: \"8e865e8b-7723-4aed-b51c-ce7a8da59d13\") " Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.158531 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "8e865e8b-7723-4aed-b51c-ce7a8da59d13" (UID: "8e865e8b-7723-4aed-b51c-ce7a8da59d13"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.158678 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "8e865e8b-7723-4aed-b51c-ce7a8da59d13" (UID: "8e865e8b-7723-4aed-b51c-ce7a8da59d13"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.162468 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "8e865e8b-7723-4aed-b51c-ce7a8da59d13" (UID: "8e865e8b-7723-4aed-b51c-ce7a8da59d13"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.174501 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e865e8b-7723-4aed-b51c-ce7a8da59d13-kube-api-access-spssg" (OuterVolumeSpecName: "kube-api-access-spssg") pod "8e865e8b-7723-4aed-b51c-ce7a8da59d13" (UID: "8e865e8b-7723-4aed-b51c-ce7a8da59d13"). InnerVolumeSpecName "kube-api-access-spssg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.174993 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-config" (OuterVolumeSpecName: "config") pod "8e865e8b-7723-4aed-b51c-ce7a8da59d13" (UID: "8e865e8b-7723-4aed-b51c-ce7a8da59d13"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.175188 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "8e865e8b-7723-4aed-b51c-ce7a8da59d13" (UID: "8e865e8b-7723-4aed-b51c-ce7a8da59d13"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.183009 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8e865e8b-7723-4aed-b51c-ce7a8da59d13-config-out" (OuterVolumeSpecName: "config-out") pod "8e865e8b-7723-4aed-b51c-ce7a8da59d13" (UID: "8e865e8b-7723-4aed-b51c-ce7a8da59d13"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.196584 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e865e8b-7723-4aed-b51c-ce7a8da59d13-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "8e865e8b-7723-4aed-b51c-ce7a8da59d13" (UID: "8e865e8b-7723-4aed-b51c-ce7a8da59d13"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.202666 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "8e865e8b-7723-4aed-b51c-ce7a8da59d13" (UID: "8e865e8b-7723-4aed-b51c-ce7a8da59d13"). InnerVolumeSpecName "pvc-f44c6b57-502f-456c-b62d-7562ab4250af". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.223825 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-web-config" (OuterVolumeSpecName: "web-config") pod "8e865e8b-7723-4aed-b51c-ce7a8da59d13" (UID: "8e865e8b-7723-4aed-b51c-ce7a8da59d13"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.239428 4703 scope.go:117] "RemoveContainer" containerID="a7b99b52d4717eddd1653ce555492f68bc313f64849651afe1894b2bdbafc0b0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.259704 4703 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/8e865e8b-7723-4aed-b51c-ce7a8da59d13-tls-assets\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.259756 4703 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.259769 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.259780 4703 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-web-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.259792 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spssg\" (UniqueName: \"kubernetes.io/projected/8e865e8b-7723-4aed-b51c-ce7a8da59d13-kube-api-access-spssg\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.259801 4703 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/8e865e8b-7723-4aed-b51c-ce7a8da59d13-config-out\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.259812 4703 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.259865 4703 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-f44c6b57-502f-456c-b62d-7562ab4250af\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") on node \"crc\" " Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.259878 4703 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/8e865e8b-7723-4aed-b51c-ce7a8da59d13-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.259889 4703 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/8e865e8b-7723-4aed-b51c-ce7a8da59d13-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.308630 4703 scope.go:117] "RemoveContainer" containerID="91438f56e553120198981dddc1f661e2c9ae1348a4ca7f5c03c2fa8964e87fc2" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.315326 4703 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.316106 4703 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-f44c6b57-502f-456c-b62d-7562ab4250af" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af") on node "crc" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.343579 4703 scope.go:117] "RemoveContainer" containerID="5ab22e287fac6accde52e20f9994a46a2ea01bfd85cc3568af2592e83a3bca79" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.362736 4703 reconciler_common.go:293] "Volume detached for volume \"pvc-f44c6b57-502f-456c-b62d-7562ab4250af\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.389403 4703 scope.go:117] "RemoveContainer" containerID="ae187ad045ba46adca2a50abcb81635324be57add273df5f7f1986b348e18cea" Jan 30 12:18:05 crc kubenswrapper[4703]: E0130 12:18:05.393268 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae187ad045ba46adca2a50abcb81635324be57add273df5f7f1986b348e18cea\": container with ID starting with ae187ad045ba46adca2a50abcb81635324be57add273df5f7f1986b348e18cea not found: ID does not exist" containerID="ae187ad045ba46adca2a50abcb81635324be57add273df5f7f1986b348e18cea" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.393316 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae187ad045ba46adca2a50abcb81635324be57add273df5f7f1986b348e18cea"} err="failed to get container status \"ae187ad045ba46adca2a50abcb81635324be57add273df5f7f1986b348e18cea\": rpc error: code = NotFound desc = could not find container \"ae187ad045ba46adca2a50abcb81635324be57add273df5f7f1986b348e18cea\": container with ID starting with ae187ad045ba46adca2a50abcb81635324be57add273df5f7f1986b348e18cea not found: ID does not exist" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.393342 4703 scope.go:117] "RemoveContainer" containerID="a7b99b52d4717eddd1653ce555492f68bc313f64849651afe1894b2bdbafc0b0" Jan 30 12:18:05 crc kubenswrapper[4703]: E0130 12:18:05.393963 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7b99b52d4717eddd1653ce555492f68bc313f64849651afe1894b2bdbafc0b0\": container with ID starting with a7b99b52d4717eddd1653ce555492f68bc313f64849651afe1894b2bdbafc0b0 not found: ID does not exist" containerID="a7b99b52d4717eddd1653ce555492f68bc313f64849651afe1894b2bdbafc0b0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.393987 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7b99b52d4717eddd1653ce555492f68bc313f64849651afe1894b2bdbafc0b0"} err="failed to get container status \"a7b99b52d4717eddd1653ce555492f68bc313f64849651afe1894b2bdbafc0b0\": rpc error: code = NotFound desc = could not find container \"a7b99b52d4717eddd1653ce555492f68bc313f64849651afe1894b2bdbafc0b0\": container with ID starting with a7b99b52d4717eddd1653ce555492f68bc313f64849651afe1894b2bdbafc0b0 not found: ID does not exist" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.394001 4703 scope.go:117] "RemoveContainer" containerID="91438f56e553120198981dddc1f661e2c9ae1348a4ca7f5c03c2fa8964e87fc2" Jan 30 12:18:05 crc kubenswrapper[4703]: E0130 12:18:05.396423 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91438f56e553120198981dddc1f661e2c9ae1348a4ca7f5c03c2fa8964e87fc2\": container with ID starting with 91438f56e553120198981dddc1f661e2c9ae1348a4ca7f5c03c2fa8964e87fc2 not found: ID does not exist" containerID="91438f56e553120198981dddc1f661e2c9ae1348a4ca7f5c03c2fa8964e87fc2" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.396449 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91438f56e553120198981dddc1f661e2c9ae1348a4ca7f5c03c2fa8964e87fc2"} err="failed to get container status \"91438f56e553120198981dddc1f661e2c9ae1348a4ca7f5c03c2fa8964e87fc2\": rpc error: code = NotFound desc = could not find container \"91438f56e553120198981dddc1f661e2c9ae1348a4ca7f5c03c2fa8964e87fc2\": container with ID starting with 91438f56e553120198981dddc1f661e2c9ae1348a4ca7f5c03c2fa8964e87fc2 not found: ID does not exist" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.396466 4703 scope.go:117] "RemoveContainer" containerID="5ab22e287fac6accde52e20f9994a46a2ea01bfd85cc3568af2592e83a3bca79" Jan 30 12:18:05 crc kubenswrapper[4703]: E0130 12:18:05.397080 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ab22e287fac6accde52e20f9994a46a2ea01bfd85cc3568af2592e83a3bca79\": container with ID starting with 5ab22e287fac6accde52e20f9994a46a2ea01bfd85cc3568af2592e83a3bca79 not found: ID does not exist" containerID="5ab22e287fac6accde52e20f9994a46a2ea01bfd85cc3568af2592e83a3bca79" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.397101 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ab22e287fac6accde52e20f9994a46a2ea01bfd85cc3568af2592e83a3bca79"} err="failed to get container status \"5ab22e287fac6accde52e20f9994a46a2ea01bfd85cc3568af2592e83a3bca79\": rpc error: code = NotFound desc = could not find container \"5ab22e287fac6accde52e20f9994a46a2ea01bfd85cc3568af2592e83a3bca79\": container with ID starting with 5ab22e287fac6accde52e20f9994a46a2ea01bfd85cc3568af2592e83a3bca79 not found: ID does not exist" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.497372 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.507189 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.527109 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 30 12:18:05 crc kubenswrapper[4703]: E0130 12:18:05.527718 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerName="prometheus" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.527751 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerName="prometheus" Jan 30 12:18:05 crc kubenswrapper[4703]: E0130 12:18:05.527769 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2688299b-7d5f-4fad-9fd9-78de6b83b333" containerName="swift-ring-rebalance" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.527779 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2688299b-7d5f-4fad-9fd9-78de6b83b333" containerName="swift-ring-rebalance" Jan 30 12:18:05 crc kubenswrapper[4703]: E0130 12:18:05.527799 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerName="thanos-sidecar" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.527810 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerName="thanos-sidecar" Jan 30 12:18:05 crc kubenswrapper[4703]: E0130 12:18:05.527819 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerName="config-reloader" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.527827 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerName="config-reloader" Jan 30 12:18:05 crc kubenswrapper[4703]: E0130 12:18:05.527845 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="af9e761d-5f56-419b-8d45-0cadf63e675a" containerName="mariadb-account-create-update" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.527855 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="af9e761d-5f56-419b-8d45-0cadf63e675a" containerName="mariadb-account-create-update" Jan 30 12:18:05 crc kubenswrapper[4703]: E0130 12:18:05.527880 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42dcc005-28fe-42d6-9d76-70c950797d16" containerName="ovn-config" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.527890 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="42dcc005-28fe-42d6-9d76-70c950797d16" containerName="ovn-config" Jan 30 12:18:05 crc kubenswrapper[4703]: E0130 12:18:05.527905 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerName="init-config-reloader" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.527912 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerName="init-config-reloader" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.528159 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerName="thanos-sidecar" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.528186 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2688299b-7d5f-4fad-9fd9-78de6b83b333" containerName="swift-ring-rebalance" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.528203 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerName="config-reloader" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.528218 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="42dcc005-28fe-42d6-9d76-70c950797d16" containerName="ovn-config" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.528230 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="af9e761d-5f56-419b-8d45-0cadf63e675a" containerName="mariadb-account-create-update" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.528242 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" containerName="prometheus" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.530344 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.537104 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.538090 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.540072 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.540284 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.540483 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-f82pr" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.541915 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.544778 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.544951 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.557049 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.568315 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.669226 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.669874 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f44c6b57-502f-456c-b62d-7562ab4250af\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.670332 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92fh8\" (UniqueName: \"kubernetes.io/projected/7a919f37-730e-42a4-848f-ae5b2096b2d2-kube-api-access-92fh8\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.670422 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-config\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.670481 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.670526 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.670551 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.670589 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.670629 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7a919f37-730e-42a4-848f-ae5b2096b2d2-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.670696 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.670728 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.671019 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7a919f37-730e-42a4-848f-ae5b2096b2d2-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.671320 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.773737 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.773847 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f44c6b57-502f-456c-b62d-7562ab4250af\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.773890 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92fh8\" (UniqueName: \"kubernetes.io/projected/7a919f37-730e-42a4-848f-ae5b2096b2d2-kube-api-access-92fh8\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.773918 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-config\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.773960 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.773993 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.774029 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.774055 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.774096 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7a919f37-730e-42a4-848f-ae5b2096b2d2-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.774159 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.774181 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.774219 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7a919f37-730e-42a4-848f-ae5b2096b2d2-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.774267 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.775276 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.776418 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.776643 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.778538 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.785957 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.786372 4703 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.786445 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f44c6b57-502f-456c-b62d-7562ab4250af\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e923a3fbd1684cd164e99ff35e919b68d893d89a026a0f736ef548b3af68c494/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.786636 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7a919f37-730e-42a4-848f-ae5b2096b2d2-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.787839 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-config\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.789526 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.790082 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.790872 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.800025 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92fh8\" (UniqueName: \"kubernetes.io/projected/7a919f37-730e-42a4-848f-ae5b2096b2d2-kube-api-access-92fh8\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.814325 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7a919f37-730e-42a4-848f-ae5b2096b2d2-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:05 crc kubenswrapper[4703]: I0130 12:18:05.864887 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f44c6b57-502f-456c-b62d-7562ab4250af\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") pod \"prometheus-metric-storage-0\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:06 crc kubenswrapper[4703]: I0130 12:18:06.151422 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:06 crc kubenswrapper[4703]: I0130 12:18:06.158958 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6aed7811-b088-403c-bbef-7844c17d52ff","Type":"ContainerStarted","Data":"d9c0a2b948b0aa21478bf5a7ff5925f10b5c0dc85604bc5835e70a27b16f8bfe"} Jan 30 12:18:07 crc kubenswrapper[4703]: I0130 12:18:07.107100 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e865e8b-7723-4aed-b51c-ce7a8da59d13" path="/var/lib/kubelet/pods/8e865e8b-7723-4aed-b51c-ce7a8da59d13/volumes" Jan 30 12:18:09 crc kubenswrapper[4703]: I0130 12:18:08.242989 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6aed7811-b088-403c-bbef-7844c17d52ff","Type":"ContainerStarted","Data":"bcebe5d9d39127c7af3e6e1abbf4727de86eb7d5f00890d3a4bf92ce5700841d"} Jan 30 12:18:09 crc kubenswrapper[4703]: I0130 12:18:08.247561 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6aed7811-b088-403c-bbef-7844c17d52ff","Type":"ContainerStarted","Data":"f74e8169d7c3a4ffb34feba2c1ee2c455ea979f21512c034a81917346b93f3ee"} Jan 30 12:18:09 crc kubenswrapper[4703]: I0130 12:18:08.247587 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6aed7811-b088-403c-bbef-7844c17d52ff","Type":"ContainerStarted","Data":"9913e0ae101bf0a3bb7512128846cbec1bb7af5136bdc83a8d582a1d63337fa6"} Jan 30 12:18:09 crc kubenswrapper[4703]: I0130 12:18:09.258361 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6aed7811-b088-403c-bbef-7844c17d52ff","Type":"ContainerStarted","Data":"0d426e4e3512a51a80a77ab8655ba3b89ccdcbb65b9fd2581b879711507ed035"} Jan 30 12:18:09 crc kubenswrapper[4703]: I0130 12:18:09.901649 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 30 12:18:09 crc kubenswrapper[4703]: W0130 12:18:09.910674 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7a919f37_730e_42a4_848f_ae5b2096b2d2.slice/crio-cadab602e9c98ba517bc13d059da63d64fc8ad6ba808760adb4cc4185a7d01cf WatchSource:0}: Error finding container cadab602e9c98ba517bc13d059da63d64fc8ad6ba808760adb4cc4185a7d01cf: Status 404 returned error can't find the container with id cadab602e9c98ba517bc13d059da63d64fc8ad6ba808760adb4cc4185a7d01cf Jan 30 12:18:10 crc kubenswrapper[4703]: I0130 12:18:10.274217 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7a919f37-730e-42a4-848f-ae5b2096b2d2","Type":"ContainerStarted","Data":"cadab602e9c98ba517bc13d059da63d64fc8ad6ba808760adb4cc4185a7d01cf"} Jan 30 12:18:11 crc kubenswrapper[4703]: I0130 12:18:11.361273 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6aed7811-b088-403c-bbef-7844c17d52ff","Type":"ContainerStarted","Data":"a1732e4c6a24bf65bfebd126f2c0aded6d65701d659047ded485c51379928297"} Jan 30 12:18:11 crc kubenswrapper[4703]: I0130 12:18:11.361804 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6aed7811-b088-403c-bbef-7844c17d52ff","Type":"ContainerStarted","Data":"ae43db8a2aa2fec9f548a4ae2ab7b93098f74facd0d73e2371c7d556ca6e191c"} Jan 30 12:18:12 crc kubenswrapper[4703]: I0130 12:18:12.302405 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 30 12:18:12 crc kubenswrapper[4703]: I0130 12:18:12.389002 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6aed7811-b088-403c-bbef-7844c17d52ff","Type":"ContainerStarted","Data":"49f567e3b4841694a185cbd0d2d8e1fb591c4f690e7e69d0323f082887321e48"} Jan 30 12:18:12 crc kubenswrapper[4703]: I0130 12:18:12.389083 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6aed7811-b088-403c-bbef-7844c17d52ff","Type":"ContainerStarted","Data":"28f4e22294831917b0be4fa164d8f5de02b69572893ecbf0972cc4fce69e8ead"} Jan 30 12:18:12 crc kubenswrapper[4703]: I0130 12:18:12.389098 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6aed7811-b088-403c-bbef-7844c17d52ff","Type":"ContainerStarted","Data":"0a6766b2be2448af8a9d0b2dccbaa65fe521dca9bfac34d48506745be12803e8"} Jan 30 12:18:12 crc kubenswrapper[4703]: I0130 12:18:12.389110 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6aed7811-b088-403c-bbef-7844c17d52ff","Type":"ContainerStarted","Data":"8f8d48144304dc29d95688067c11640fba48f9953fef74fde6af7316187ac710"} Jan 30 12:18:12 crc kubenswrapper[4703]: I0130 12:18:12.900514 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-db-sync-vhshs"] Jan 30 12:18:12 crc kubenswrapper[4703]: I0130 12:18:12.902708 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-vhshs" Jan 30 12:18:12 crc kubenswrapper[4703]: I0130 12:18:12.908577 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-config-data" Jan 30 12:18:12 crc kubenswrapper[4703]: I0130 12:18:12.909741 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-tt2z5" Jan 30 12:18:12 crc kubenswrapper[4703]: I0130 12:18:12.934684 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-b4l7b"] Jan 30 12:18:12 crc kubenswrapper[4703]: I0130 12:18:12.936684 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-b4l7b" Jan 30 12:18:12 crc kubenswrapper[4703]: I0130 12:18:12.951747 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-vhshs"] Jan 30 12:18:12 crc kubenswrapper[4703]: I0130 12:18:12.963289 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-b4l7b"] Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.018327 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/23b1be5c-b917-4633-b207-9dfeb79ebadc-operator-scripts\") pod \"cinder-db-create-b4l7b\" (UID: \"23b1be5c-b917-4633-b207-9dfeb79ebadc\") " pod="openstack/cinder-db-create-b4l7b" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.018518 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-config-data\") pod \"watcher-db-sync-vhshs\" (UID: \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\") " pod="openstack/watcher-db-sync-vhshs" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.018560 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-combined-ca-bundle\") pod \"watcher-db-sync-vhshs\" (UID: \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\") " pod="openstack/watcher-db-sync-vhshs" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.018641 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5jdv\" (UniqueName: \"kubernetes.io/projected/cd3bf27b-46bc-468e-8735-3a3ed6eda272-kube-api-access-b5jdv\") pod \"watcher-db-sync-vhshs\" (UID: \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\") " pod="openstack/watcher-db-sync-vhshs" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.018671 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhnmk\" (UniqueName: \"kubernetes.io/projected/23b1be5c-b917-4633-b207-9dfeb79ebadc-kube-api-access-vhnmk\") pod \"cinder-db-create-b4l7b\" (UID: \"23b1be5c-b917-4633-b207-9dfeb79ebadc\") " pod="openstack/cinder-db-create-b4l7b" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.018706 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-db-sync-config-data\") pod \"watcher-db-sync-vhshs\" (UID: \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\") " pod="openstack/watcher-db-sync-vhshs" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.073475 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-6qgph"] Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.077089 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-6qgph" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.114668 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-6qgph"] Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.125629 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-config-data\") pod \"watcher-db-sync-vhshs\" (UID: \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\") " pod="openstack/watcher-db-sync-vhshs" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.125698 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-combined-ca-bundle\") pod \"watcher-db-sync-vhshs\" (UID: \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\") " pod="openstack/watcher-db-sync-vhshs" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.125814 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5jdv\" (UniqueName: \"kubernetes.io/projected/cd3bf27b-46bc-468e-8735-3a3ed6eda272-kube-api-access-b5jdv\") pod \"watcher-db-sync-vhshs\" (UID: \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\") " pod="openstack/watcher-db-sync-vhshs" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.125844 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhnmk\" (UniqueName: \"kubernetes.io/projected/23b1be5c-b917-4633-b207-9dfeb79ebadc-kube-api-access-vhnmk\") pod \"cinder-db-create-b4l7b\" (UID: \"23b1be5c-b917-4633-b207-9dfeb79ebadc\") " pod="openstack/cinder-db-create-b4l7b" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.125871 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-db-sync-config-data\") pod \"watcher-db-sync-vhshs\" (UID: \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\") " pod="openstack/watcher-db-sync-vhshs" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.125904 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/23b1be5c-b917-4633-b207-9dfeb79ebadc-operator-scripts\") pod \"cinder-db-create-b4l7b\" (UID: \"23b1be5c-b917-4633-b207-9dfeb79ebadc\") " pod="openstack/cinder-db-create-b4l7b" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.127062 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/23b1be5c-b917-4633-b207-9dfeb79ebadc-operator-scripts\") pod \"cinder-db-create-b4l7b\" (UID: \"23b1be5c-b917-4633-b207-9dfeb79ebadc\") " pod="openstack/cinder-db-create-b4l7b" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.139831 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-config-data\") pod \"watcher-db-sync-vhshs\" (UID: \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\") " pod="openstack/watcher-db-sync-vhshs" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.147867 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-db-sync-config-data\") pod \"watcher-db-sync-vhshs\" (UID: \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\") " pod="openstack/watcher-db-sync-vhshs" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.166781 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-c990-account-create-update-hr75h"] Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.171475 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-combined-ca-bundle\") pod \"watcher-db-sync-vhshs\" (UID: \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\") " pod="openstack/watcher-db-sync-vhshs" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.172020 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-c990-account-create-update-hr75h" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.181597 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.185793 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5jdv\" (UniqueName: \"kubernetes.io/projected/cd3bf27b-46bc-468e-8735-3a3ed6eda272-kube-api-access-b5jdv\") pod \"watcher-db-sync-vhshs\" (UID: \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\") " pod="openstack/watcher-db-sync-vhshs" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.188993 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhnmk\" (UniqueName: \"kubernetes.io/projected/23b1be5c-b917-4633-b207-9dfeb79ebadc-kube-api-access-vhnmk\") pod \"cinder-db-create-b4l7b\" (UID: \"23b1be5c-b917-4633-b207-9dfeb79ebadc\") " pod="openstack/cinder-db-create-b4l7b" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.224371 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-c990-account-create-update-hr75h"] Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.234165 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5c9rr\" (UniqueName: \"kubernetes.io/projected/fe0648b2-c4f9-47dd-bf18-e00e2958c243-kube-api-access-5c9rr\") pod \"barbican-db-create-6qgph\" (UID: \"fe0648b2-c4f9-47dd-bf18-e00e2958c243\") " pod="openstack/barbican-db-create-6qgph" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.235168 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fe0648b2-c4f9-47dd-bf18-e00e2958c243-operator-scripts\") pod \"barbican-db-create-6qgph\" (UID: \"fe0648b2-c4f9-47dd-bf18-e00e2958c243\") " pod="openstack/barbican-db-create-6qgph" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.317600 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-75krj"] Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.319701 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-75krj" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.324935 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.325549 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.325692 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.325876 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-cz6x7" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.351719 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-b4l7b" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.352334 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fe0648b2-c4f9-47dd-bf18-e00e2958c243-operator-scripts\") pod \"barbican-db-create-6qgph\" (UID: \"fe0648b2-c4f9-47dd-bf18-e00e2958c243\") " pod="openstack/barbican-db-create-6qgph" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.353839 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-vhshs" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.354757 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcbxs\" (UniqueName: \"kubernetes.io/projected/24145d6b-2acd-413a-8305-a295030ebe1f-kube-api-access-vcbxs\") pod \"barbican-c990-account-create-update-hr75h\" (UID: \"24145d6b-2acd-413a-8305-a295030ebe1f\") " pod="openstack/barbican-c990-account-create-update-hr75h" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.355008 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5c9rr\" (UniqueName: \"kubernetes.io/projected/fe0648b2-c4f9-47dd-bf18-e00e2958c243-kube-api-access-5c9rr\") pod \"barbican-db-create-6qgph\" (UID: \"fe0648b2-c4f9-47dd-bf18-e00e2958c243\") " pod="openstack/barbican-db-create-6qgph" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.355265 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24145d6b-2acd-413a-8305-a295030ebe1f-operator-scripts\") pod \"barbican-c990-account-create-update-hr75h\" (UID: \"24145d6b-2acd-413a-8305-a295030ebe1f\") " pod="openstack/barbican-c990-account-create-update-hr75h" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.356202 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fe0648b2-c4f9-47dd-bf18-e00e2958c243-operator-scripts\") pod \"barbican-db-create-6qgph\" (UID: \"fe0648b2-c4f9-47dd-bf18-e00e2958c243\") " pod="openstack/barbican-db-create-6qgph" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.385789 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-75krj"] Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.449516 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-njb8f"] Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.451366 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-njb8f" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.460024 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-combined-ca-bundle\") pod \"keystone-db-sync-75krj\" (UID: \"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76\") " pod="openstack/keystone-db-sync-75krj" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.460113 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d79ec4e-4ab2-4be5-8682-ad1d8aeea327-operator-scripts\") pod \"neutron-db-create-njb8f\" (UID: \"6d79ec4e-4ab2-4be5-8682-ad1d8aeea327\") " pod="openstack/neutron-db-create-njb8f" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.460220 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24145d6b-2acd-413a-8305-a295030ebe1f-operator-scripts\") pod \"barbican-c990-account-create-update-hr75h\" (UID: \"24145d6b-2acd-413a-8305-a295030ebe1f\") " pod="openstack/barbican-c990-account-create-update-hr75h" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.460295 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vs8b4\" (UniqueName: \"kubernetes.io/projected/6d79ec4e-4ab2-4be5-8682-ad1d8aeea327-kube-api-access-vs8b4\") pod \"neutron-db-create-njb8f\" (UID: \"6d79ec4e-4ab2-4be5-8682-ad1d8aeea327\") " pod="openstack/neutron-db-create-njb8f" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.460338 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-config-data\") pod \"keystone-db-sync-75krj\" (UID: \"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76\") " pod="openstack/keystone-db-sync-75krj" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.460422 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcbxs\" (UniqueName: \"kubernetes.io/projected/24145d6b-2acd-413a-8305-a295030ebe1f-kube-api-access-vcbxs\") pod \"barbican-c990-account-create-update-hr75h\" (UID: \"24145d6b-2acd-413a-8305-a295030ebe1f\") " pod="openstack/barbican-c990-account-create-update-hr75h" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.460449 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ws8dn\" (UniqueName: \"kubernetes.io/projected/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-kube-api-access-ws8dn\") pod \"keystone-db-sync-75krj\" (UID: \"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76\") " pod="openstack/keystone-db-sync-75krj" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.461046 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24145d6b-2acd-413a-8305-a295030ebe1f-operator-scripts\") pod \"barbican-c990-account-create-update-hr75h\" (UID: \"24145d6b-2acd-413a-8305-a295030ebe1f\") " pod="openstack/barbican-c990-account-create-update-hr75h" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.518378 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-njb8f"] Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.546067 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcbxs\" (UniqueName: \"kubernetes.io/projected/24145d6b-2acd-413a-8305-a295030ebe1f-kube-api-access-vcbxs\") pod \"barbican-c990-account-create-update-hr75h\" (UID: \"24145d6b-2acd-413a-8305-a295030ebe1f\") " pod="openstack/barbican-c990-account-create-update-hr75h" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.546557 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-9239-account-create-update-rd2pp"] Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.548501 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5c9rr\" (UniqueName: \"kubernetes.io/projected/fe0648b2-c4f9-47dd-bf18-e00e2958c243-kube-api-access-5c9rr\") pod \"barbican-db-create-6qgph\" (UID: \"fe0648b2-c4f9-47dd-bf18-e00e2958c243\") " pod="openstack/barbican-db-create-6qgph" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.555415 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-9239-account-create-update-rd2pp" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.559629 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.576868 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-combined-ca-bundle\") pod \"keystone-db-sync-75krj\" (UID: \"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76\") " pod="openstack/keystone-db-sync-75krj" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.577023 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d79ec4e-4ab2-4be5-8682-ad1d8aeea327-operator-scripts\") pod \"neutron-db-create-njb8f\" (UID: \"6d79ec4e-4ab2-4be5-8682-ad1d8aeea327\") " pod="openstack/neutron-db-create-njb8f" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.577147 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vs8b4\" (UniqueName: \"kubernetes.io/projected/6d79ec4e-4ab2-4be5-8682-ad1d8aeea327-kube-api-access-vs8b4\") pod \"neutron-db-create-njb8f\" (UID: \"6d79ec4e-4ab2-4be5-8682-ad1d8aeea327\") " pod="openstack/neutron-db-create-njb8f" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.577207 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-config-data\") pod \"keystone-db-sync-75krj\" (UID: \"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76\") " pod="openstack/keystone-db-sync-75krj" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.577294 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6k86\" (UniqueName: \"kubernetes.io/projected/5f7deb02-fe54-403e-b67a-45e8d8e62a62-kube-api-access-j6k86\") pod \"cinder-9239-account-create-update-rd2pp\" (UID: \"5f7deb02-fe54-403e-b67a-45e8d8e62a62\") " pod="openstack/cinder-9239-account-create-update-rd2pp" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.577347 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5f7deb02-fe54-403e-b67a-45e8d8e62a62-operator-scripts\") pod \"cinder-9239-account-create-update-rd2pp\" (UID: \"5f7deb02-fe54-403e-b67a-45e8d8e62a62\") " pod="openstack/cinder-9239-account-create-update-rd2pp" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.577395 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ws8dn\" (UniqueName: \"kubernetes.io/projected/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-kube-api-access-ws8dn\") pod \"keystone-db-sync-75krj\" (UID: \"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76\") " pod="openstack/keystone-db-sync-75krj" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.581857 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d79ec4e-4ab2-4be5-8682-ad1d8aeea327-operator-scripts\") pod \"neutron-db-create-njb8f\" (UID: \"6d79ec4e-4ab2-4be5-8682-ad1d8aeea327\") " pod="openstack/neutron-db-create-njb8f" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.584149 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-combined-ca-bundle\") pod \"keystone-db-sync-75krj\" (UID: \"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76\") " pod="openstack/keystone-db-sync-75krj" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.586911 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-config-data\") pod \"keystone-db-sync-75krj\" (UID: \"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76\") " pod="openstack/keystone-db-sync-75krj" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.604541 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-c990-account-create-update-hr75h" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.648686 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vs8b4\" (UniqueName: \"kubernetes.io/projected/6d79ec4e-4ab2-4be5-8682-ad1d8aeea327-kube-api-access-vs8b4\") pod \"neutron-db-create-njb8f\" (UID: \"6d79ec4e-4ab2-4be5-8682-ad1d8aeea327\") " pod="openstack/neutron-db-create-njb8f" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.659256 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-9239-account-create-update-rd2pp"] Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.681512 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6k86\" (UniqueName: \"kubernetes.io/projected/5f7deb02-fe54-403e-b67a-45e8d8e62a62-kube-api-access-j6k86\") pod \"cinder-9239-account-create-update-rd2pp\" (UID: \"5f7deb02-fe54-403e-b67a-45e8d8e62a62\") " pod="openstack/cinder-9239-account-create-update-rd2pp" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.681574 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5f7deb02-fe54-403e-b67a-45e8d8e62a62-operator-scripts\") pod \"cinder-9239-account-create-update-rd2pp\" (UID: \"5f7deb02-fe54-403e-b67a-45e8d8e62a62\") " pod="openstack/cinder-9239-account-create-update-rd2pp" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.682804 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5f7deb02-fe54-403e-b67a-45e8d8e62a62-operator-scripts\") pod \"cinder-9239-account-create-update-rd2pp\" (UID: \"5f7deb02-fe54-403e-b67a-45e8d8e62a62\") " pod="openstack/cinder-9239-account-create-update-rd2pp" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.694530 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ws8dn\" (UniqueName: \"kubernetes.io/projected/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-kube-api-access-ws8dn\") pod \"keystone-db-sync-75krj\" (UID: \"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76\") " pod="openstack/keystone-db-sync-75krj" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.702564 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-6qgph" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.726332 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-654f-account-create-update-w7fdk"] Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.727782 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-654f-account-create-update-w7fdk" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.734456 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.741428 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6k86\" (UniqueName: \"kubernetes.io/projected/5f7deb02-fe54-403e-b67a-45e8d8e62a62-kube-api-access-j6k86\") pod \"cinder-9239-account-create-update-rd2pp\" (UID: \"5f7deb02-fe54-403e-b67a-45e8d8e62a62\") " pod="openstack/cinder-9239-account-create-update-rd2pp" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.766076 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-654f-account-create-update-w7fdk"] Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.781757 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-njb8f" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.785860 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4a552c6-d8b7-470e-aaae-01d7d29d9cb5-operator-scripts\") pod \"neutron-654f-account-create-update-w7fdk\" (UID: \"f4a552c6-d8b7-470e-aaae-01d7d29d9cb5\") " pod="openstack/neutron-654f-account-create-update-w7fdk" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.786041 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4czxv\" (UniqueName: \"kubernetes.io/projected/f4a552c6-d8b7-470e-aaae-01d7d29d9cb5-kube-api-access-4czxv\") pod \"neutron-654f-account-create-update-w7fdk\" (UID: \"f4a552c6-d8b7-470e-aaae-01d7d29d9cb5\") " pod="openstack/neutron-654f-account-create-update-w7fdk" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.889544 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4a552c6-d8b7-470e-aaae-01d7d29d9cb5-operator-scripts\") pod \"neutron-654f-account-create-update-w7fdk\" (UID: \"f4a552c6-d8b7-470e-aaae-01d7d29d9cb5\") " pod="openstack/neutron-654f-account-create-update-w7fdk" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.889701 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4czxv\" (UniqueName: \"kubernetes.io/projected/f4a552c6-d8b7-470e-aaae-01d7d29d9cb5-kube-api-access-4czxv\") pod \"neutron-654f-account-create-update-w7fdk\" (UID: \"f4a552c6-d8b7-470e-aaae-01d7d29d9cb5\") " pod="openstack/neutron-654f-account-create-update-w7fdk" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.890968 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4a552c6-d8b7-470e-aaae-01d7d29d9cb5-operator-scripts\") pod \"neutron-654f-account-create-update-w7fdk\" (UID: \"f4a552c6-d8b7-470e-aaae-01d7d29d9cb5\") " pod="openstack/neutron-654f-account-create-update-w7fdk" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.933406 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4czxv\" (UniqueName: \"kubernetes.io/projected/f4a552c6-d8b7-470e-aaae-01d7d29d9cb5-kube-api-access-4czxv\") pod \"neutron-654f-account-create-update-w7fdk\" (UID: \"f4a552c6-d8b7-470e-aaae-01d7d29d9cb5\") " pod="openstack/neutron-654f-account-create-update-w7fdk" Jan 30 12:18:13 crc kubenswrapper[4703]: I0130 12:18:13.943451 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-75krj" Jan 30 12:18:14 crc kubenswrapper[4703]: I0130 12:18:14.011739 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-9239-account-create-update-rd2pp" Jan 30 12:18:14 crc kubenswrapper[4703]: I0130 12:18:14.058979 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-654f-account-create-update-w7fdk" Jan 30 12:18:14 crc kubenswrapper[4703]: I0130 12:18:14.455018 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"6aed7811-b088-403c-bbef-7844c17d52ff","Type":"ContainerStarted","Data":"702fad2cf7fe071202321e1d89aa8da683098cd9ede7d1971be18e548f29ed9e"} Jan 30 12:18:14 crc kubenswrapper[4703]: I0130 12:18:14.466895 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7a919f37-730e-42a4-848f-ae5b2096b2d2","Type":"ContainerStarted","Data":"10e0bcb02fd9e626b5845d848fc0133efa1c70a29ef4dadf9b2cbb9c1160d864"} Jan 30 12:18:14 crc kubenswrapper[4703]: I0130 12:18:14.527825 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=38.194795886 podStartE2EDuration="46.527787031s" podCreationTimestamp="2026-01-30 12:17:28 +0000 UTC" firstStartedPulling="2026-01-30 12:18:02.040717062 +0000 UTC m=+1317.818538726" lastFinishedPulling="2026-01-30 12:18:10.373708227 +0000 UTC m=+1326.151529871" observedRunningTime="2026-01-30 12:18:14.5106685 +0000 UTC m=+1330.288490174" watchObservedRunningTime="2026-01-30 12:18:14.527787031 +0000 UTC m=+1330.305608685" Jan 30 12:18:14 crc kubenswrapper[4703]: I0130 12:18:14.951775 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-hjndc"] Jan 30 12:18:14 crc kubenswrapper[4703]: I0130 12:18:14.954076 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:14 crc kubenswrapper[4703]: I0130 12:18:14.961283 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 30 12:18:14 crc kubenswrapper[4703]: I0130 12:18:14.974576 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-hjndc"] Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.108599 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-config\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.108670 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.108704 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t4zgz\" (UniqueName: \"kubernetes.io/projected/bea2808f-b06b-4388-865f-9cac9ca53857-kube-api-access-t4zgz\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.108767 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-dns-svc\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.108821 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.108849 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.176981 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-75krj"] Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.210780 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-dns-svc\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.210933 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.210972 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.211079 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-config\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.211101 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.211147 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t4zgz\" (UniqueName: \"kubernetes.io/projected/bea2808f-b06b-4388-865f-9cac9ca53857-kube-api-access-t4zgz\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.214783 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-dns-svc\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.218310 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-config\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.218880 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-ovsdbserver-sb\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.218890 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-ovsdbserver-nb\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.232163 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-dns-swift-storage-0\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.250055 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t4zgz\" (UniqueName: \"kubernetes.io/projected/bea2808f-b06b-4388-865f-9cac9ca53857-kube-api-access-t4zgz\") pod \"dnsmasq-dns-764c5664d7-hjndc\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.360218 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-c990-account-create-update-hr75h"] Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.391887 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-njb8f"] Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.483603 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.544755 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-6qgph"] Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.546446 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-njb8f" event={"ID":"6d79ec4e-4ab2-4be5-8682-ad1d8aeea327","Type":"ContainerStarted","Data":"35e39245c3ed629d091ff45db23aa8aa7a22519ce11e174daa256216aeb23aa1"} Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.557181 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-75krj" event={"ID":"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76","Type":"ContainerStarted","Data":"353d471835e04cc2ae2a005a26409252def08cbbbfa8dcce13017f7cfc9d642e"} Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.560088 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-c990-account-create-update-hr75h" event={"ID":"24145d6b-2acd-413a-8305-a295030ebe1f","Type":"ContainerStarted","Data":"f2171885da20415337154696cb3a005f44b4ea0af0f51b873666d5f8fd0fabd1"} Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.589478 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-db-sync-vhshs"] Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.615648 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-b4l7b"] Jan 30 12:18:15 crc kubenswrapper[4703]: W0130 12:18:15.622506 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf4a552c6_d8b7_470e_aaae_01d7d29d9cb5.slice/crio-1b2c1b89e789fa735472c51ce6ad9bbb59fca12b2fc4a8d3277a09e416a2d9f7 WatchSource:0}: Error finding container 1b2c1b89e789fa735472c51ce6ad9bbb59fca12b2fc4a8d3277a09e416a2d9f7: Status 404 returned error can't find the container with id 1b2c1b89e789fa735472c51ce6ad9bbb59fca12b2fc4a8d3277a09e416a2d9f7 Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.645696 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-654f-account-create-update-w7fdk"] Jan 30 12:18:15 crc kubenswrapper[4703]: I0130 12:18:15.654647 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-9239-account-create-update-rd2pp"] Jan 30 12:18:15 crc kubenswrapper[4703]: W0130 12:18:15.669626 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfe0648b2_c4f9_47dd_bf18_e00e2958c243.slice/crio-c8c707d90ad35c5f8bf3ae41c341a7745ca6b533587b155f879fad5783db4174 WatchSource:0}: Error finding container c8c707d90ad35c5f8bf3ae41c341a7745ca6b533587b155f879fad5783db4174: Status 404 returned error can't find the container with id c8c707d90ad35c5f8bf3ae41c341a7745ca6b533587b155f879fad5783db4174 Jan 30 12:18:16 crc kubenswrapper[4703]: I0130 12:18:16.578650 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-vhshs" event={"ID":"cd3bf27b-46bc-468e-8735-3a3ed6eda272","Type":"ContainerStarted","Data":"3732b114ad4c2bda7860ef9f2f8057f29fff0cb8e46d0eb1d9e10ece925eb8ff"} Jan 30 12:18:16 crc kubenswrapper[4703]: I0130 12:18:16.589225 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-6qgph" event={"ID":"fe0648b2-c4f9-47dd-bf18-e00e2958c243","Type":"ContainerStarted","Data":"c8c707d90ad35c5f8bf3ae41c341a7745ca6b533587b155f879fad5783db4174"} Jan 30 12:18:16 crc kubenswrapper[4703]: I0130 12:18:16.590714 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-9239-account-create-update-rd2pp" event={"ID":"5f7deb02-fe54-403e-b67a-45e8d8e62a62","Type":"ContainerStarted","Data":"ce477f7e1035a07d7ad2e77802849b63fcd6afe30cd1941f056d7265c8c22434"} Jan 30 12:18:16 crc kubenswrapper[4703]: I0130 12:18:16.593362 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-654f-account-create-update-w7fdk" event={"ID":"f4a552c6-d8b7-470e-aaae-01d7d29d9cb5","Type":"ContainerStarted","Data":"1b2c1b89e789fa735472c51ce6ad9bbb59fca12b2fc4a8d3277a09e416a2d9f7"} Jan 30 12:18:16 crc kubenswrapper[4703]: I0130 12:18:16.594246 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-b4l7b" event={"ID":"23b1be5c-b917-4633-b207-9dfeb79ebadc","Type":"ContainerStarted","Data":"7c988be470e7a84b1269aed1ed5c3956e7b56cd090c012294d44ad4f871ea84d"} Jan 30 12:18:16 crc kubenswrapper[4703]: I0130 12:18:16.890549 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-hjndc"] Jan 30 12:18:17 crc kubenswrapper[4703]: I0130 12:18:17.616188 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-hrpzm" event={"ID":"3da2d370-06f5-4fcc-b58e-2676657e6e85","Type":"ContainerStarted","Data":"b9ee7795f51ad91aa101278413e3e09d2b7e685ca655ad1c5f939e17faffef58"} Jan 30 12:18:17 crc kubenswrapper[4703]: I0130 12:18:17.629209 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-9239-account-create-update-rd2pp" event={"ID":"5f7deb02-fe54-403e-b67a-45e8d8e62a62","Type":"ContainerStarted","Data":"b39cce2491b0c477dd40fa0817ad89e87c53df3a8f93e40dddeac51360bf2021"} Jan 30 12:18:17 crc kubenswrapper[4703]: I0130 12:18:17.640816 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-hrpzm" podStartSLOduration=4.438610237 podStartE2EDuration="41.640796949s" podCreationTimestamp="2026-01-30 12:17:36 +0000 UTC" firstStartedPulling="2026-01-30 12:17:37.393200418 +0000 UTC m=+1293.171022072" lastFinishedPulling="2026-01-30 12:18:14.59538713 +0000 UTC m=+1330.373208784" observedRunningTime="2026-01-30 12:18:17.640493141 +0000 UTC m=+1333.418314795" watchObservedRunningTime="2026-01-30 12:18:17.640796949 +0000 UTC m=+1333.418618603" Jan 30 12:18:17 crc kubenswrapper[4703]: I0130 12:18:17.650820 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-654f-account-create-update-w7fdk" event={"ID":"f4a552c6-d8b7-470e-aaae-01d7d29d9cb5","Type":"ContainerStarted","Data":"19f2d4d421c4124ccfcc7c381bcca94056f61f6d818cb1b84c1eaba6271260ea"} Jan 30 12:18:17 crc kubenswrapper[4703]: I0130 12:18:17.673336 4703 generic.go:334] "Generic (PLEG): container finished" podID="23b1be5c-b917-4633-b207-9dfeb79ebadc" containerID="652385ce09399001b04ffc1823d82d1425e460e9ca7665e4b2b3e4d3e61115a6" exitCode=0 Jan 30 12:18:17 crc kubenswrapper[4703]: I0130 12:18:17.673489 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-b4l7b" event={"ID":"23b1be5c-b917-4633-b207-9dfeb79ebadc","Type":"ContainerDied","Data":"652385ce09399001b04ffc1823d82d1425e460e9ca7665e4b2b3e4d3e61115a6"} Jan 30 12:18:17 crc kubenswrapper[4703]: I0130 12:18:17.682834 4703 generic.go:334] "Generic (PLEG): container finished" podID="24145d6b-2acd-413a-8305-a295030ebe1f" containerID="2683e841d3c3fb7cd7de6acfce779ba1263a4ee4512411ec956ea6c89217e13b" exitCode=0 Jan 30 12:18:17 crc kubenswrapper[4703]: I0130 12:18:17.682955 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-c990-account-create-update-hr75h" event={"ID":"24145d6b-2acd-413a-8305-a295030ebe1f","Type":"ContainerDied","Data":"2683e841d3c3fb7cd7de6acfce779ba1263a4ee4512411ec956ea6c89217e13b"} Jan 30 12:18:17 crc kubenswrapper[4703]: I0130 12:18:17.687543 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-hjndc" event={"ID":"bea2808f-b06b-4388-865f-9cac9ca53857","Type":"ContainerStarted","Data":"881d63959d37b039010b03fac3aae6db1de00b580950b3c6aeaa73e0435a1deb"} Jan 30 12:18:17 crc kubenswrapper[4703]: I0130 12:18:17.688138 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-hjndc" event={"ID":"bea2808f-b06b-4388-865f-9cac9ca53857","Type":"ContainerStarted","Data":"c48d230221ff732c55713b4c37c1e074b3d37a47e08d3a1ef0ea885528c934cf"} Jan 30 12:18:17 crc kubenswrapper[4703]: I0130 12:18:17.701113 4703 generic.go:334] "Generic (PLEG): container finished" podID="fe0648b2-c4f9-47dd-bf18-e00e2958c243" containerID="af5700bcd05249811d07e7aa4a70ed9eb251484b5acb22fd8ec6461ec3009b1b" exitCode=0 Jan 30 12:18:17 crc kubenswrapper[4703]: I0130 12:18:17.701238 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-6qgph" event={"ID":"fe0648b2-c4f9-47dd-bf18-e00e2958c243","Type":"ContainerDied","Data":"af5700bcd05249811d07e7aa4a70ed9eb251484b5acb22fd8ec6461ec3009b1b"} Jan 30 12:18:17 crc kubenswrapper[4703]: I0130 12:18:17.705357 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-654f-account-create-update-w7fdk" podStartSLOduration=4.705334416 podStartE2EDuration="4.705334416s" podCreationTimestamp="2026-01-30 12:18:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:18:17.691871914 +0000 UTC m=+1333.469693568" watchObservedRunningTime="2026-01-30 12:18:17.705334416 +0000 UTC m=+1333.483156070" Jan 30 12:18:17 crc kubenswrapper[4703]: I0130 12:18:17.705528 4703 generic.go:334] "Generic (PLEG): container finished" podID="6d79ec4e-4ab2-4be5-8682-ad1d8aeea327" containerID="fb86633033de415e0881625c9d147999160c01808552e9ddb659a3d7908a03a8" exitCode=0 Jan 30 12:18:17 crc kubenswrapper[4703]: I0130 12:18:17.705602 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-njb8f" event={"ID":"6d79ec4e-4ab2-4be5-8682-ad1d8aeea327","Type":"ContainerDied","Data":"fb86633033de415e0881625c9d147999160c01808552e9ddb659a3d7908a03a8"} Jan 30 12:18:17 crc kubenswrapper[4703]: I0130 12:18:17.716536 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-9239-account-create-update-rd2pp" podStartSLOduration=4.7148801030000005 podStartE2EDuration="4.714880103s" podCreationTimestamp="2026-01-30 12:18:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:18:17.670599611 +0000 UTC m=+1333.448421275" watchObservedRunningTime="2026-01-30 12:18:17.714880103 +0000 UTC m=+1333.492701767" Jan 30 12:18:18 crc kubenswrapper[4703]: I0130 12:18:18.724814 4703 generic.go:334] "Generic (PLEG): container finished" podID="5f7deb02-fe54-403e-b67a-45e8d8e62a62" containerID="b39cce2491b0c477dd40fa0817ad89e87c53df3a8f93e40dddeac51360bf2021" exitCode=0 Jan 30 12:18:18 crc kubenswrapper[4703]: I0130 12:18:18.724953 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-9239-account-create-update-rd2pp" event={"ID":"5f7deb02-fe54-403e-b67a-45e8d8e62a62","Type":"ContainerDied","Data":"b39cce2491b0c477dd40fa0817ad89e87c53df3a8f93e40dddeac51360bf2021"} Jan 30 12:18:18 crc kubenswrapper[4703]: I0130 12:18:18.734091 4703 generic.go:334] "Generic (PLEG): container finished" podID="f4a552c6-d8b7-470e-aaae-01d7d29d9cb5" containerID="19f2d4d421c4124ccfcc7c381bcca94056f61f6d818cb1b84c1eaba6271260ea" exitCode=0 Jan 30 12:18:18 crc kubenswrapper[4703]: I0130 12:18:18.734250 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-654f-account-create-update-w7fdk" event={"ID":"f4a552c6-d8b7-470e-aaae-01d7d29d9cb5","Type":"ContainerDied","Data":"19f2d4d421c4124ccfcc7c381bcca94056f61f6d818cb1b84c1eaba6271260ea"} Jan 30 12:18:18 crc kubenswrapper[4703]: I0130 12:18:18.745900 4703 generic.go:334] "Generic (PLEG): container finished" podID="bea2808f-b06b-4388-865f-9cac9ca53857" containerID="881d63959d37b039010b03fac3aae6db1de00b580950b3c6aeaa73e0435a1deb" exitCode=0 Jan 30 12:18:18 crc kubenswrapper[4703]: I0130 12:18:18.746572 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-hjndc" event={"ID":"bea2808f-b06b-4388-865f-9cac9ca53857","Type":"ContainerDied","Data":"881d63959d37b039010b03fac3aae6db1de00b580950b3c6aeaa73e0435a1deb"} Jan 30 12:18:18 crc kubenswrapper[4703]: I0130 12:18:18.746657 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-hjndc" event={"ID":"bea2808f-b06b-4388-865f-9cac9ca53857","Type":"ContainerStarted","Data":"2afe21d8bff3648d5aadae798fd8612bf1d4bd06128b70afbd57129731c29508"} Jan 30 12:18:18 crc kubenswrapper[4703]: I0130 12:18:18.747418 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:18 crc kubenswrapper[4703]: I0130 12:18:18.814899 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-764c5664d7-hjndc" podStartSLOduration=4.814868883 podStartE2EDuration="4.814868883s" podCreationTimestamp="2026-01-30 12:18:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:18:18.797913217 +0000 UTC m=+1334.575734871" watchObservedRunningTime="2026-01-30 12:18:18.814868883 +0000 UTC m=+1334.592690537" Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.110847 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-6qgph" event={"ID":"fe0648b2-c4f9-47dd-bf18-e00e2958c243","Type":"ContainerDied","Data":"c8c707d90ad35c5f8bf3ae41c341a7745ca6b533587b155f879fad5783db4174"} Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.111210 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c8c707d90ad35c5f8bf3ae41c341a7745ca6b533587b155f879fad5783db4174" Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.121978 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-6qgph" Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.173196 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fe0648b2-c4f9-47dd-bf18-e00e2958c243-operator-scripts\") pod \"fe0648b2-c4f9-47dd-bf18-e00e2958c243\" (UID: \"fe0648b2-c4f9-47dd-bf18-e00e2958c243\") " Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.173344 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5c9rr\" (UniqueName: \"kubernetes.io/projected/fe0648b2-c4f9-47dd-bf18-e00e2958c243-kube-api-access-5c9rr\") pod \"fe0648b2-c4f9-47dd-bf18-e00e2958c243\" (UID: \"fe0648b2-c4f9-47dd-bf18-e00e2958c243\") " Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.175467 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe0648b2-c4f9-47dd-bf18-e00e2958c243-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fe0648b2-c4f9-47dd-bf18-e00e2958c243" (UID: "fe0648b2-c4f9-47dd-bf18-e00e2958c243"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.199748 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe0648b2-c4f9-47dd-bf18-e00e2958c243-kube-api-access-5c9rr" (OuterVolumeSpecName: "kube-api-access-5c9rr") pod "fe0648b2-c4f9-47dd-bf18-e00e2958c243" (UID: "fe0648b2-c4f9-47dd-bf18-e00e2958c243"). InnerVolumeSpecName "kube-api-access-5c9rr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.277163 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5c9rr\" (UniqueName: \"kubernetes.io/projected/fe0648b2-c4f9-47dd-bf18-e00e2958c243-kube-api-access-5c9rr\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.277204 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fe0648b2-c4f9-47dd-bf18-e00e2958c243-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.558306 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-njb8f" Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.586202 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vs8b4\" (UniqueName: \"kubernetes.io/projected/6d79ec4e-4ab2-4be5-8682-ad1d8aeea327-kube-api-access-vs8b4\") pod \"6d79ec4e-4ab2-4be5-8682-ad1d8aeea327\" (UID: \"6d79ec4e-4ab2-4be5-8682-ad1d8aeea327\") " Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.586310 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d79ec4e-4ab2-4be5-8682-ad1d8aeea327-operator-scripts\") pod \"6d79ec4e-4ab2-4be5-8682-ad1d8aeea327\" (UID: \"6d79ec4e-4ab2-4be5-8682-ad1d8aeea327\") " Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.588053 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d79ec4e-4ab2-4be5-8682-ad1d8aeea327-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6d79ec4e-4ab2-4be5-8682-ad1d8aeea327" (UID: "6d79ec4e-4ab2-4be5-8682-ad1d8aeea327"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.620655 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d79ec4e-4ab2-4be5-8682-ad1d8aeea327-kube-api-access-vs8b4" (OuterVolumeSpecName: "kube-api-access-vs8b4") pod "6d79ec4e-4ab2-4be5-8682-ad1d8aeea327" (UID: "6d79ec4e-4ab2-4be5-8682-ad1d8aeea327"). InnerVolumeSpecName "kube-api-access-vs8b4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.691737 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vs8b4\" (UniqueName: \"kubernetes.io/projected/6d79ec4e-4ab2-4be5-8682-ad1d8aeea327-kube-api-access-vs8b4\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.691796 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d79ec4e-4ab2-4be5-8682-ad1d8aeea327-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.840264 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-b4l7b" Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.863803 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-c990-account-create-update-hr75h" Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.898752 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-9239-account-create-update-rd2pp" Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.984075 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-654f-account-create-update-w7fdk" Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.997981 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5f7deb02-fe54-403e-b67a-45e8d8e62a62-operator-scripts\") pod \"5f7deb02-fe54-403e-b67a-45e8d8e62a62\" (UID: \"5f7deb02-fe54-403e-b67a-45e8d8e62a62\") " Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.998081 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6k86\" (UniqueName: \"kubernetes.io/projected/5f7deb02-fe54-403e-b67a-45e8d8e62a62-kube-api-access-j6k86\") pod \"5f7deb02-fe54-403e-b67a-45e8d8e62a62\" (UID: \"5f7deb02-fe54-403e-b67a-45e8d8e62a62\") " Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.998139 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24145d6b-2acd-413a-8305-a295030ebe1f-operator-scripts\") pod \"24145d6b-2acd-413a-8305-a295030ebe1f\" (UID: \"24145d6b-2acd-413a-8305-a295030ebe1f\") " Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.998163 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vcbxs\" (UniqueName: \"kubernetes.io/projected/24145d6b-2acd-413a-8305-a295030ebe1f-kube-api-access-vcbxs\") pod \"24145d6b-2acd-413a-8305-a295030ebe1f\" (UID: \"24145d6b-2acd-413a-8305-a295030ebe1f\") " Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.998215 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/23b1be5c-b917-4633-b207-9dfeb79ebadc-operator-scripts\") pod \"23b1be5c-b917-4633-b207-9dfeb79ebadc\" (UID: \"23b1be5c-b917-4633-b207-9dfeb79ebadc\") " Jan 30 12:18:20 crc kubenswrapper[4703]: I0130 12:18:20.998332 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vhnmk\" (UniqueName: \"kubernetes.io/projected/23b1be5c-b917-4633-b207-9dfeb79ebadc-kube-api-access-vhnmk\") pod \"23b1be5c-b917-4633-b207-9dfeb79ebadc\" (UID: \"23b1be5c-b917-4633-b207-9dfeb79ebadc\") " Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.002263 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24145d6b-2acd-413a-8305-a295030ebe1f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "24145d6b-2acd-413a-8305-a295030ebe1f" (UID: "24145d6b-2acd-413a-8305-a295030ebe1f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.002925 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f7deb02-fe54-403e-b67a-45e8d8e62a62-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5f7deb02-fe54-403e-b67a-45e8d8e62a62" (UID: "5f7deb02-fe54-403e-b67a-45e8d8e62a62"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.003524 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23b1be5c-b917-4633-b207-9dfeb79ebadc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "23b1be5c-b917-4633-b207-9dfeb79ebadc" (UID: "23b1be5c-b917-4633-b207-9dfeb79ebadc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.012244 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23b1be5c-b917-4633-b207-9dfeb79ebadc-kube-api-access-vhnmk" (OuterVolumeSpecName: "kube-api-access-vhnmk") pod "23b1be5c-b917-4633-b207-9dfeb79ebadc" (UID: "23b1be5c-b917-4633-b207-9dfeb79ebadc"). InnerVolumeSpecName "kube-api-access-vhnmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.013286 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f7deb02-fe54-403e-b67a-45e8d8e62a62-kube-api-access-j6k86" (OuterVolumeSpecName: "kube-api-access-j6k86") pod "5f7deb02-fe54-403e-b67a-45e8d8e62a62" (UID: "5f7deb02-fe54-403e-b67a-45e8d8e62a62"). InnerVolumeSpecName "kube-api-access-j6k86". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.014212 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24145d6b-2acd-413a-8305-a295030ebe1f-kube-api-access-vcbxs" (OuterVolumeSpecName: "kube-api-access-vcbxs") pod "24145d6b-2acd-413a-8305-a295030ebe1f" (UID: "24145d6b-2acd-413a-8305-a295030ebe1f"). InnerVolumeSpecName "kube-api-access-vcbxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.014883 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vhnmk\" (UniqueName: \"kubernetes.io/projected/23b1be5c-b917-4633-b207-9dfeb79ebadc-kube-api-access-vhnmk\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.014922 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5f7deb02-fe54-403e-b67a-45e8d8e62a62-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.014936 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6k86\" (UniqueName: \"kubernetes.io/projected/5f7deb02-fe54-403e-b67a-45e8d8e62a62-kube-api-access-j6k86\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.014950 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/24145d6b-2acd-413a-8305-a295030ebe1f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.014968 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/23b1be5c-b917-4633-b207-9dfeb79ebadc-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.115937 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4czxv\" (UniqueName: \"kubernetes.io/projected/f4a552c6-d8b7-470e-aaae-01d7d29d9cb5-kube-api-access-4czxv\") pod \"f4a552c6-d8b7-470e-aaae-01d7d29d9cb5\" (UID: \"f4a552c6-d8b7-470e-aaae-01d7d29d9cb5\") " Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.116024 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4a552c6-d8b7-470e-aaae-01d7d29d9cb5-operator-scripts\") pod \"f4a552c6-d8b7-470e-aaae-01d7d29d9cb5\" (UID: \"f4a552c6-d8b7-470e-aaae-01d7d29d9cb5\") " Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.116864 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vcbxs\" (UniqueName: \"kubernetes.io/projected/24145d6b-2acd-413a-8305-a295030ebe1f-kube-api-access-vcbxs\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.117390 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4a552c6-d8b7-470e-aaae-01d7d29d9cb5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f4a552c6-d8b7-470e-aaae-01d7d29d9cb5" (UID: "f4a552c6-d8b7-470e-aaae-01d7d29d9cb5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.122839 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4a552c6-d8b7-470e-aaae-01d7d29d9cb5-kube-api-access-4czxv" (OuterVolumeSpecName: "kube-api-access-4czxv") pod "f4a552c6-d8b7-470e-aaae-01d7d29d9cb5" (UID: "f4a552c6-d8b7-470e-aaae-01d7d29d9cb5"). InnerVolumeSpecName "kube-api-access-4czxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.140975 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-9239-account-create-update-rd2pp" event={"ID":"5f7deb02-fe54-403e-b67a-45e8d8e62a62","Type":"ContainerDied","Data":"ce477f7e1035a07d7ad2e77802849b63fcd6afe30cd1941f056d7265c8c22434"} Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.141478 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce477f7e1035a07d7ad2e77802849b63fcd6afe30cd1941f056d7265c8c22434" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.141325 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-9239-account-create-update-rd2pp" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.145150 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-654f-account-create-update-w7fdk" event={"ID":"f4a552c6-d8b7-470e-aaae-01d7d29d9cb5","Type":"ContainerDied","Data":"1b2c1b89e789fa735472c51ce6ad9bbb59fca12b2fc4a8d3277a09e416a2d9f7"} Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.145219 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b2c1b89e789fa735472c51ce6ad9bbb59fca12b2fc4a8d3277a09e416a2d9f7" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.145173 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-654f-account-create-update-w7fdk" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.147251 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-b4l7b" event={"ID":"23b1be5c-b917-4633-b207-9dfeb79ebadc","Type":"ContainerDied","Data":"7c988be470e7a84b1269aed1ed5c3956e7b56cd090c012294d44ad4f871ea84d"} Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.147285 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c988be470e7a84b1269aed1ed5c3956e7b56cd090c012294d44ad4f871ea84d" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.147362 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-b4l7b" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.154451 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-c990-account-create-update-hr75h" event={"ID":"24145d6b-2acd-413a-8305-a295030ebe1f","Type":"ContainerDied","Data":"f2171885da20415337154696cb3a005f44b4ea0af0f51b873666d5f8fd0fabd1"} Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.154504 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f2171885da20415337154696cb3a005f44b4ea0af0f51b873666d5f8fd0fabd1" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.154596 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-c990-account-create-update-hr75h" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.159654 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-6qgph" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.160239 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-njb8f" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.160585 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-njb8f" event={"ID":"6d79ec4e-4ab2-4be5-8682-ad1d8aeea327","Type":"ContainerDied","Data":"35e39245c3ed629d091ff45db23aa8aa7a22519ce11e174daa256216aeb23aa1"} Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.160707 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="35e39245c3ed629d091ff45db23aa8aa7a22519ce11e174daa256216aeb23aa1" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.219604 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4czxv\" (UniqueName: \"kubernetes.io/projected/f4a552c6-d8b7-470e-aaae-01d7d29d9cb5-kube-api-access-4czxv\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:21 crc kubenswrapper[4703]: I0130 12:18:21.219665 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4a552c6-d8b7-470e-aaae-01d7d29d9cb5-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:23 crc kubenswrapper[4703]: I0130 12:18:23.192430 4703 generic.go:334] "Generic (PLEG): container finished" podID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerID="10e0bcb02fd9e626b5845d848fc0133efa1c70a29ef4dadf9b2cbb9c1160d864" exitCode=0 Jan 30 12:18:23 crc kubenswrapper[4703]: I0130 12:18:23.192510 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7a919f37-730e-42a4-848f-ae5b2096b2d2","Type":"ContainerDied","Data":"10e0bcb02fd9e626b5845d848fc0133efa1c70a29ef4dadf9b2cbb9c1160d864"} Jan 30 12:18:25 crc kubenswrapper[4703]: I0130 12:18:25.546466 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:18:25 crc kubenswrapper[4703]: I0130 12:18:25.631534 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-9gqcb"] Jan 30 12:18:25 crc kubenswrapper[4703]: I0130 12:18:25.631903 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-9gqcb" podUID="5f0bf583-356b-4550-9257-ec624b1e5de1" containerName="dnsmasq-dns" containerID="cri-o://8b75261f5087658d6065c79dbc80a36cab75a7635d5a2e56e959769b10d384e2" gracePeriod=10 Jan 30 12:18:26 crc kubenswrapper[4703]: I0130 12:18:26.232571 4703 generic.go:334] "Generic (PLEG): container finished" podID="5f0bf583-356b-4550-9257-ec624b1e5de1" containerID="8b75261f5087658d6065c79dbc80a36cab75a7635d5a2e56e959769b10d384e2" exitCode=0 Jan 30 12:18:26 crc kubenswrapper[4703]: I0130 12:18:26.232983 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-9gqcb" event={"ID":"5f0bf583-356b-4550-9257-ec624b1e5de1","Type":"ContainerDied","Data":"8b75261f5087658d6065c79dbc80a36cab75a7635d5a2e56e959769b10d384e2"} Jan 30 12:18:28 crc kubenswrapper[4703]: I0130 12:18:28.500902 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-9gqcb" podUID="5f0bf583-356b-4550-9257-ec624b1e5de1" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.129:5353: connect: connection refused" Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.328188 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-9gqcb" event={"ID":"5f0bf583-356b-4550-9257-ec624b1e5de1","Type":"ContainerDied","Data":"4b2905208fb3c0ca11df0f33521bab89f8a9217d17f8b2b547ac8b2fc0b23fff"} Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.329107 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4b2905208fb3c0ca11df0f33521bab89f8a9217d17f8b2b547ac8b2fc0b23fff" Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.331996 4703 generic.go:334] "Generic (PLEG): container finished" podID="3da2d370-06f5-4fcc-b58e-2676657e6e85" containerID="b9ee7795f51ad91aa101278413e3e09d2b7e685ca655ad1c5f939e17faffef58" exitCode=0 Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.332035 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-hrpzm" event={"ID":"3da2d370-06f5-4fcc-b58e-2676657e6e85","Type":"ContainerDied","Data":"b9ee7795f51ad91aa101278413e3e09d2b7e685ca655ad1c5f939e17faffef58"} Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.386021 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.499600 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-ovsdbserver-nb\") pod \"5f0bf583-356b-4550-9257-ec624b1e5de1\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.499685 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8qsc9\" (UniqueName: \"kubernetes.io/projected/5f0bf583-356b-4550-9257-ec624b1e5de1-kube-api-access-8qsc9\") pod \"5f0bf583-356b-4550-9257-ec624b1e5de1\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.499720 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-config\") pod \"5f0bf583-356b-4550-9257-ec624b1e5de1\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.499757 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-dns-svc\") pod \"5f0bf583-356b-4550-9257-ec624b1e5de1\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.499963 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-ovsdbserver-sb\") pod \"5f0bf583-356b-4550-9257-ec624b1e5de1\" (UID: \"5f0bf583-356b-4550-9257-ec624b1e5de1\") " Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.509519 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f0bf583-356b-4550-9257-ec624b1e5de1-kube-api-access-8qsc9" (OuterVolumeSpecName: "kube-api-access-8qsc9") pod "5f0bf583-356b-4550-9257-ec624b1e5de1" (UID: "5f0bf583-356b-4550-9257-ec624b1e5de1"). InnerVolumeSpecName "kube-api-access-8qsc9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.553738 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-config" (OuterVolumeSpecName: "config") pod "5f0bf583-356b-4550-9257-ec624b1e5de1" (UID: "5f0bf583-356b-4550-9257-ec624b1e5de1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.559057 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5f0bf583-356b-4550-9257-ec624b1e5de1" (UID: "5f0bf583-356b-4550-9257-ec624b1e5de1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.564276 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5f0bf583-356b-4550-9257-ec624b1e5de1" (UID: "5f0bf583-356b-4550-9257-ec624b1e5de1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.569661 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5f0bf583-356b-4550-9257-ec624b1e5de1" (UID: "5f0bf583-356b-4550-9257-ec624b1e5de1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.602988 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.603034 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.603048 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8qsc9\" (UniqueName: \"kubernetes.io/projected/5f0bf583-356b-4550-9257-ec624b1e5de1-kube-api-access-8qsc9\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.603061 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:34 crc kubenswrapper[4703]: I0130 12:18:34.603070 4703 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5f0bf583-356b-4550-9257-ec624b1e5de1-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:35 crc kubenswrapper[4703]: I0130 12:18:35.344221 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-9gqcb" Jan 30 12:18:35 crc kubenswrapper[4703]: I0130 12:18:35.382820 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-9gqcb"] Jan 30 12:18:35 crc kubenswrapper[4703]: I0130 12:18:35.392197 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-9gqcb"] Jan 30 12:18:37 crc kubenswrapper[4703]: I0130 12:18:37.101003 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f0bf583-356b-4550-9257-ec624b1e5de1" path="/var/lib/kubelet/pods/5f0bf583-356b-4550-9257-ec624b1e5de1/volumes" Jan 30 12:18:38 crc kubenswrapper[4703]: I0130 12:18:38.501872 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-9gqcb" podUID="5f0bf583-356b-4550-9257-ec624b1e5de1" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.129:5353: i/o timeout" Jan 30 12:18:39 crc kubenswrapper[4703]: I0130 12:18:39.605442 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-hrpzm" Jan 30 12:18:39 crc kubenswrapper[4703]: I0130 12:18:39.614607 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p6tfg\" (UniqueName: \"kubernetes.io/projected/3da2d370-06f5-4fcc-b58e-2676657e6e85-kube-api-access-p6tfg\") pod \"3da2d370-06f5-4fcc-b58e-2676657e6e85\" (UID: \"3da2d370-06f5-4fcc-b58e-2676657e6e85\") " Jan 30 12:18:39 crc kubenswrapper[4703]: I0130 12:18:39.614677 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-config-data\") pod \"3da2d370-06f5-4fcc-b58e-2676657e6e85\" (UID: \"3da2d370-06f5-4fcc-b58e-2676657e6e85\") " Jan 30 12:18:39 crc kubenswrapper[4703]: I0130 12:18:39.614708 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-db-sync-config-data\") pod \"3da2d370-06f5-4fcc-b58e-2676657e6e85\" (UID: \"3da2d370-06f5-4fcc-b58e-2676657e6e85\") " Jan 30 12:18:39 crc kubenswrapper[4703]: I0130 12:18:39.614941 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-combined-ca-bundle\") pod \"3da2d370-06f5-4fcc-b58e-2676657e6e85\" (UID: \"3da2d370-06f5-4fcc-b58e-2676657e6e85\") " Jan 30 12:18:39 crc kubenswrapper[4703]: I0130 12:18:39.623027 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3da2d370-06f5-4fcc-b58e-2676657e6e85-kube-api-access-p6tfg" (OuterVolumeSpecName: "kube-api-access-p6tfg") pod "3da2d370-06f5-4fcc-b58e-2676657e6e85" (UID: "3da2d370-06f5-4fcc-b58e-2676657e6e85"). InnerVolumeSpecName "kube-api-access-p6tfg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:18:39 crc kubenswrapper[4703]: I0130 12:18:39.623780 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "3da2d370-06f5-4fcc-b58e-2676657e6e85" (UID: "3da2d370-06f5-4fcc-b58e-2676657e6e85"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:18:39 crc kubenswrapper[4703]: I0130 12:18:39.649310 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3da2d370-06f5-4fcc-b58e-2676657e6e85" (UID: "3da2d370-06f5-4fcc-b58e-2676657e6e85"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:18:39 crc kubenswrapper[4703]: I0130 12:18:39.682936 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-config-data" (OuterVolumeSpecName: "config-data") pod "3da2d370-06f5-4fcc-b58e-2676657e6e85" (UID: "3da2d370-06f5-4fcc-b58e-2676657e6e85"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:18:39 crc kubenswrapper[4703]: I0130 12:18:39.717985 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:39 crc kubenswrapper[4703]: I0130 12:18:39.718049 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p6tfg\" (UniqueName: \"kubernetes.io/projected/3da2d370-06f5-4fcc-b58e-2676657e6e85-kube-api-access-p6tfg\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:39 crc kubenswrapper[4703]: I0130 12:18:39.718073 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:39 crc kubenswrapper[4703]: I0130 12:18:39.718090 4703 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/3da2d370-06f5-4fcc-b58e-2676657e6e85-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:40 crc kubenswrapper[4703]: E0130 12:18:40.197790 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.129.56.193:5001/podified-epoxy-centos9/openstack-watcher-api:watcher_latest" Jan 30 12:18:40 crc kubenswrapper[4703]: E0130 12:18:40.198302 4703 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="38.129.56.193:5001/podified-epoxy-centos9/openstack-watcher-api:watcher_latest" Jan 30 12:18:40 crc kubenswrapper[4703]: E0130 12:18:40.198682 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:watcher-db-sync,Image:38.129.56.193:5001/podified-epoxy-centos9/openstack-watcher-api:watcher_latest,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/watcher/watcher.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:watcher-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b5jdv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-db-sync-vhshs_openstack(cd3bf27b-46bc-468e-8735-3a3ed6eda272): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:18:40 crc kubenswrapper[4703]: E0130 12:18:40.201319 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/watcher-db-sync-vhshs" podUID="cd3bf27b-46bc-468e-8735-3a3ed6eda272" Jan 30 12:18:40 crc kubenswrapper[4703]: I0130 12:18:40.397538 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7a919f37-730e-42a4-848f-ae5b2096b2d2","Type":"ContainerStarted","Data":"b93716e0d37b866137d69aaac757b996589738dfddc32139ac80f5192065268c"} Jan 30 12:18:40 crc kubenswrapper[4703]: I0130 12:18:40.399438 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-hrpzm" event={"ID":"3da2d370-06f5-4fcc-b58e-2676657e6e85","Type":"ContainerDied","Data":"5550da3e1d2d27d9cc47f2560f39079093d4b5a2b126aebbb16dddd4de98a4ef"} Jan 30 12:18:40 crc kubenswrapper[4703]: I0130 12:18:40.399471 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5550da3e1d2d27d9cc47f2560f39079093d4b5a2b126aebbb16dddd4de98a4ef" Jan 30 12:18:40 crc kubenswrapper[4703]: I0130 12:18:40.399543 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-hrpzm" Jan 30 12:18:40 crc kubenswrapper[4703]: I0130 12:18:40.410371 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-75krj" event={"ID":"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76","Type":"ContainerStarted","Data":"ac6c7fe606c25265e0a7610c2c63533975819202f2cfd4cf4cd4188f28779a0f"} Jan 30 12:18:40 crc kubenswrapper[4703]: E0130 12:18:40.412109 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"watcher-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"38.129.56.193:5001/podified-epoxy-centos9/openstack-watcher-api:watcher_latest\\\"\"" pod="openstack/watcher-db-sync-vhshs" podUID="cd3bf27b-46bc-468e-8735-3a3ed6eda272" Jan 30 12:18:40 crc kubenswrapper[4703]: I0130 12:18:40.750645 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-75krj" podStartSLOduration=2.838755437 podStartE2EDuration="27.750615414s" podCreationTimestamp="2026-01-30 12:18:13 +0000 UTC" firstStartedPulling="2026-01-30 12:18:15.235629978 +0000 UTC m=+1331.013451632" lastFinishedPulling="2026-01-30 12:18:40.147489955 +0000 UTC m=+1355.925311609" observedRunningTime="2026-01-30 12:18:40.736100253 +0000 UTC m=+1356.513921907" watchObservedRunningTime="2026-01-30 12:18:40.750615414 +0000 UTC m=+1356.528437068" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.224754 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-6rwpn"] Jan 30 12:18:41 crc kubenswrapper[4703]: E0130 12:18:41.225977 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4a552c6-d8b7-470e-aaae-01d7d29d9cb5" containerName="mariadb-account-create-update" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.226003 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4a552c6-d8b7-470e-aaae-01d7d29d9cb5" containerName="mariadb-account-create-update" Jan 30 12:18:41 crc kubenswrapper[4703]: E0130 12:18:41.226029 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3da2d370-06f5-4fcc-b58e-2676657e6e85" containerName="glance-db-sync" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.226037 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="3da2d370-06f5-4fcc-b58e-2676657e6e85" containerName="glance-db-sync" Jan 30 12:18:41 crc kubenswrapper[4703]: E0130 12:18:41.226046 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23b1be5c-b917-4633-b207-9dfeb79ebadc" containerName="mariadb-database-create" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.226054 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="23b1be5c-b917-4633-b207-9dfeb79ebadc" containerName="mariadb-database-create" Jan 30 12:18:41 crc kubenswrapper[4703]: E0130 12:18:41.226069 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f0bf583-356b-4550-9257-ec624b1e5de1" containerName="init" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.226076 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f0bf583-356b-4550-9257-ec624b1e5de1" containerName="init" Jan 30 12:18:41 crc kubenswrapper[4703]: E0130 12:18:41.226089 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f7deb02-fe54-403e-b67a-45e8d8e62a62" containerName="mariadb-account-create-update" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.226097 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f7deb02-fe54-403e-b67a-45e8d8e62a62" containerName="mariadb-account-create-update" Jan 30 12:18:41 crc kubenswrapper[4703]: E0130 12:18:41.226131 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe0648b2-c4f9-47dd-bf18-e00e2958c243" containerName="mariadb-database-create" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.226140 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe0648b2-c4f9-47dd-bf18-e00e2958c243" containerName="mariadb-database-create" Jan 30 12:18:41 crc kubenswrapper[4703]: E0130 12:18:41.226155 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f0bf583-356b-4550-9257-ec624b1e5de1" containerName="dnsmasq-dns" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.226162 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f0bf583-356b-4550-9257-ec624b1e5de1" containerName="dnsmasq-dns" Jan 30 12:18:41 crc kubenswrapper[4703]: E0130 12:18:41.226172 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24145d6b-2acd-413a-8305-a295030ebe1f" containerName="mariadb-account-create-update" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.226181 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="24145d6b-2acd-413a-8305-a295030ebe1f" containerName="mariadb-account-create-update" Jan 30 12:18:41 crc kubenswrapper[4703]: E0130 12:18:41.226194 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d79ec4e-4ab2-4be5-8682-ad1d8aeea327" containerName="mariadb-database-create" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.226201 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d79ec4e-4ab2-4be5-8682-ad1d8aeea327" containerName="mariadb-database-create" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.226460 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f0bf583-356b-4550-9257-ec624b1e5de1" containerName="dnsmasq-dns" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.226507 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="24145d6b-2acd-413a-8305-a295030ebe1f" containerName="mariadb-account-create-update" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.226527 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4a552c6-d8b7-470e-aaae-01d7d29d9cb5" containerName="mariadb-account-create-update" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.226536 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe0648b2-c4f9-47dd-bf18-e00e2958c243" containerName="mariadb-database-create" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.226558 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="23b1be5c-b917-4633-b207-9dfeb79ebadc" containerName="mariadb-database-create" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.226570 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="3da2d370-06f5-4fcc-b58e-2676657e6e85" containerName="glance-db-sync" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.226584 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f7deb02-fe54-403e-b67a-45e8d8e62a62" containerName="mariadb-account-create-update" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.226602 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d79ec4e-4ab2-4be5-8682-ad1d8aeea327" containerName="mariadb-database-create" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.227956 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.244453 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-6rwpn"] Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.249321 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-dns-swift-storage-0\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.249412 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dpl2\" (UniqueName: \"kubernetes.io/projected/b2a66906-bc0b-419a-9304-6f623d26d9d4-kube-api-access-7dpl2\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.249476 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-config\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.249512 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-ovsdbserver-sb\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.249556 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-dns-svc\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.249598 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.352013 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-dns-swift-storage-0\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.352492 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dpl2\" (UniqueName: \"kubernetes.io/projected/b2a66906-bc0b-419a-9304-6f623d26d9d4-kube-api-access-7dpl2\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.352650 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-config\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.352761 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-ovsdbserver-sb\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.352873 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-dns-svc\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.353002 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.353793 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-dns-swift-storage-0\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.354319 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-ovsdbserver-sb\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.356809 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-ovsdbserver-nb\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.357906 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-config\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.359909 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-dns-svc\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.378743 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dpl2\" (UniqueName: \"kubernetes.io/projected/b2a66906-bc0b-419a-9304-6f623d26d9d4-kube-api-access-7dpl2\") pod \"dnsmasq-dns-74f6bcbc87-6rwpn\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:41 crc kubenswrapper[4703]: I0130 12:18:41.575935 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:42 crc kubenswrapper[4703]: I0130 12:18:42.418501 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-6rwpn"] Jan 30 12:18:42 crc kubenswrapper[4703]: I0130 12:18:42.449432 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" event={"ID":"b2a66906-bc0b-419a-9304-6f623d26d9d4","Type":"ContainerStarted","Data":"7c63c1b3016826510d9bf25b56b6270cc9894b7abc94cbb343fa5e9f5bfe812a"} Jan 30 12:18:43 crc kubenswrapper[4703]: I0130 12:18:43.619868 4703 generic.go:334] "Generic (PLEG): container finished" podID="b2a66906-bc0b-419a-9304-6f623d26d9d4" containerID="91ac5467a73d0e8ae35204f202883e8aec17e7102bc78779a790a45040ae8bdf" exitCode=0 Jan 30 12:18:43 crc kubenswrapper[4703]: I0130 12:18:43.620326 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" event={"ID":"b2a66906-bc0b-419a-9304-6f623d26d9d4","Type":"ContainerDied","Data":"91ac5467a73d0e8ae35204f202883e8aec17e7102bc78779a790a45040ae8bdf"} Jan 30 12:18:44 crc kubenswrapper[4703]: I0130 12:18:44.636022 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" event={"ID":"b2a66906-bc0b-419a-9304-6f623d26d9d4","Type":"ContainerStarted","Data":"cbb31278c1f8397b1644ae1d0a8c7040513e4a4caed5da8a559c0866c5e5889c"} Jan 30 12:18:44 crc kubenswrapper[4703]: I0130 12:18:44.636943 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:44 crc kubenswrapper[4703]: I0130 12:18:44.640981 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7a919f37-730e-42a4-848f-ae5b2096b2d2","Type":"ContainerStarted","Data":"93898e666f9e06e253c6f5caa355ad757f959514951a03b165d07fe622904434"} Jan 30 12:18:44 crc kubenswrapper[4703]: I0130 12:18:44.641040 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7a919f37-730e-42a4-848f-ae5b2096b2d2","Type":"ContainerStarted","Data":"cbf2e3a5111beddb54852d3d864b33df39fe6e7ea9aa977e50c5c6a07a7b51b2"} Jan 30 12:18:44 crc kubenswrapper[4703]: I0130 12:18:44.667383 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" podStartSLOduration=3.667361301 podStartE2EDuration="3.667361301s" podCreationTimestamp="2026-01-30 12:18:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:18:44.65841049 +0000 UTC m=+1360.436232144" watchObservedRunningTime="2026-01-30 12:18:44.667361301 +0000 UTC m=+1360.445182955" Jan 30 12:18:44 crc kubenswrapper[4703]: I0130 12:18:44.694256 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=39.694234114 podStartE2EDuration="39.694234114s" podCreationTimestamp="2026-01-30 12:18:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:18:44.688967372 +0000 UTC m=+1360.466789026" watchObservedRunningTime="2026-01-30 12:18:44.694234114 +0000 UTC m=+1360.472055768" Jan 30 12:18:46 crc kubenswrapper[4703]: I0130 12:18:46.152413 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:46 crc kubenswrapper[4703]: I0130 12:18:46.665988 4703 generic.go:334] "Generic (PLEG): container finished" podID="5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76" containerID="ac6c7fe606c25265e0a7610c2c63533975819202f2cfd4cf4cd4188f28779a0f" exitCode=0 Jan 30 12:18:46 crc kubenswrapper[4703]: I0130 12:18:46.666096 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-75krj" event={"ID":"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76","Type":"ContainerDied","Data":"ac6c7fe606c25265e0a7610c2c63533975819202f2cfd4cf4cd4188f28779a0f"} Jan 30 12:18:48 crc kubenswrapper[4703]: I0130 12:18:48.080563 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-75krj" Jan 30 12:18:48 crc kubenswrapper[4703]: I0130 12:18:48.162395 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-combined-ca-bundle\") pod \"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76\" (UID: \"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76\") " Jan 30 12:18:48 crc kubenswrapper[4703]: I0130 12:18:48.162521 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ws8dn\" (UniqueName: \"kubernetes.io/projected/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-kube-api-access-ws8dn\") pod \"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76\" (UID: \"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76\") " Jan 30 12:18:48 crc kubenswrapper[4703]: I0130 12:18:48.162576 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-config-data\") pod \"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76\" (UID: \"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76\") " Jan 30 12:18:48 crc kubenswrapper[4703]: I0130 12:18:48.171626 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-kube-api-access-ws8dn" (OuterVolumeSpecName: "kube-api-access-ws8dn") pod "5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76" (UID: "5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76"). InnerVolumeSpecName "kube-api-access-ws8dn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:18:48 crc kubenswrapper[4703]: I0130 12:18:48.196798 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76" (UID: "5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:18:48 crc kubenswrapper[4703]: I0130 12:18:48.219576 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-config-data" (OuterVolumeSpecName: "config-data") pod "5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76" (UID: "5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:18:48 crc kubenswrapper[4703]: I0130 12:18:48.265382 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:48 crc kubenswrapper[4703]: I0130 12:18:48.265441 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ws8dn\" (UniqueName: \"kubernetes.io/projected/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-kube-api-access-ws8dn\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:48 crc kubenswrapper[4703]: I0130 12:18:48.265459 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:48 crc kubenswrapper[4703]: I0130 12:18:48.692041 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-75krj" event={"ID":"5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76","Type":"ContainerDied","Data":"353d471835e04cc2ae2a005a26409252def08cbbbfa8dcce13017f7cfc9d642e"} Jan 30 12:18:48 crc kubenswrapper[4703]: I0130 12:18:48.692474 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="353d471835e04cc2ae2a005a26409252def08cbbbfa8dcce13017f7cfc9d642e" Jan 30 12:18:48 crc kubenswrapper[4703]: I0130 12:18:48.692170 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-75krj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.063897 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-dswjd"] Jan 30 12:18:49 crc kubenswrapper[4703]: E0130 12:18:49.064672 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76" containerName="keystone-db-sync" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.064699 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76" containerName="keystone-db-sync" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.064939 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76" containerName="keystone-db-sync" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.066104 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.071304 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.071400 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.077054 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.077542 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-cz6x7" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.077756 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.079740 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-fernet-keys\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.079810 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-config-data\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.079833 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-scripts\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.079926 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-combined-ca-bundle\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.080010 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnmt8\" (UniqueName: \"kubernetes.io/projected/ba6892f1-1087-4650-b6fe-701fc48c94f7-kube-api-access-xnmt8\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.080484 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-credential-keys\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.085229 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-6rwpn"] Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.085673 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" podUID="b2a66906-bc0b-419a-9304-6f623d26d9d4" containerName="dnsmasq-dns" containerID="cri-o://cbb31278c1f8397b1644ae1d0a8c7040513e4a4caed5da8a559c0866c5e5889c" gracePeriod=10 Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.118353 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-dswjd"] Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.118448 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.183986 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-combined-ca-bundle\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.184064 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnmt8\" (UniqueName: \"kubernetes.io/projected/ba6892f1-1087-4650-b6fe-701fc48c94f7-kube-api-access-xnmt8\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.184178 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-credential-keys\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.184240 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-fernet-keys\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.184280 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-config-data\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.184302 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-scripts\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.200669 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-k8mfj"] Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.204616 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.207240 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-config-data\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.212710 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-scripts\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.213413 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-fernet-keys\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.217189 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-combined-ca-bundle\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.248884 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnmt8\" (UniqueName: \"kubernetes.io/projected/ba6892f1-1087-4650-b6fe-701fc48c94f7-kube-api-access-xnmt8\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.285232 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrsxg\" (UniqueName: \"kubernetes.io/projected/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-kube-api-access-xrsxg\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.285302 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-dns-svc\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.285351 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-ovsdbserver-sb\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.285387 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-ovsdbserver-nb\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.285417 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-config\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.285448 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-dns-swift-storage-0\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.291320 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-credential-keys\") pod \"keystone-bootstrap-dswjd\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.297218 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-k8mfj"] Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.388771 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-config\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.388961 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-dns-swift-storage-0\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.389089 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrsxg\" (UniqueName: \"kubernetes.io/projected/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-kube-api-access-xrsxg\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.389197 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-dns-svc\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.389308 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-ovsdbserver-sb\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.389448 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-ovsdbserver-nb\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.407968 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.408072 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-dns-swift-storage-0\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.397593 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-ovsdbserver-nb\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.415726 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-scrgs"] Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.417513 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.419295 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-config\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.419518 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-ovsdbserver-sb\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.425194 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-dns-svc\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.439661 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.440150 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.743649 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-8z4wq" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.757542 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrsxg\" (UniqueName: \"kubernetes.io/projected/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-kube-api-access-xrsxg\") pod \"dnsmasq-dns-847c4cc679-k8mfj\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.762748 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-scrgs"] Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.854546 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1665113c-fcaa-4a13-9de2-552579864e44-etc-machine-id\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.854656 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-config-data\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.854685 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-combined-ca-bundle\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.854740 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xclk\" (UniqueName: \"kubernetes.io/projected/1665113c-fcaa-4a13-9de2-552579864e44-kube-api-access-4xclk\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.854768 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-db-sync-config-data\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.854813 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-scripts\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.957583 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-combined-ca-bundle\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.957708 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xclk\" (UniqueName: \"kubernetes.io/projected/1665113c-fcaa-4a13-9de2-552579864e44-kube-api-access-4xclk\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.957746 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-db-sync-config-data\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.957788 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-scripts\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.957847 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1665113c-fcaa-4a13-9de2-552579864e44-etc-machine-id\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.958312 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1665113c-fcaa-4a13-9de2-552579864e44-etc-machine-id\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.959779 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-config-data\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.971627 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-db-sync-config-data\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.984141 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-combined-ca-bundle\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:49 crc kubenswrapper[4703]: I0130 12:18:49.984584 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-scripts\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.013244 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-594746f8cc-b7spb"] Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.013841 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-config-data\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.015616 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.021478 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xclk\" (UniqueName: \"kubernetes.io/projected/1665113c-fcaa-4a13-9de2-552579864e44-kube-api-access-4xclk\") pod \"cinder-db-sync-scrgs\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.040775 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.040995 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.041176 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.041349 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-jk6wb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.043401 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-95pfb"] Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.045278 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-95pfb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.063483 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.100470 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.110729 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.113670 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-drksr" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.142104 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-scrgs" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.204531 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6ea69f39-6623-4a6e-adff-6014bb7c749b-config-data\") pod \"horizon-594746f8cc-b7spb\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.204621 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6ea69f39-6623-4a6e-adff-6014bb7c749b-horizon-secret-key\") pod \"horizon-594746f8cc-b7spb\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.204652 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6ea69f39-6623-4a6e-adff-6014bb7c749b-scripts\") pod \"horizon-594746f8cc-b7spb\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.204708 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcq9s\" (UniqueName: \"kubernetes.io/projected/bd8fdb9b-147d-4634-a312-719f1c62c7ff-kube-api-access-rcq9s\") pod \"neutron-db-sync-95pfb\" (UID: \"bd8fdb9b-147d-4634-a312-719f1c62c7ff\") " pod="openstack/neutron-db-sync-95pfb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.204776 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd8fdb9b-147d-4634-a312-719f1c62c7ff-combined-ca-bundle\") pod \"neutron-db-sync-95pfb\" (UID: \"bd8fdb9b-147d-4634-a312-719f1c62c7ff\") " pod="openstack/neutron-db-sync-95pfb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.204817 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bd8fdb9b-147d-4634-a312-719f1c62c7ff-config\") pod \"neutron-db-sync-95pfb\" (UID: \"bd8fdb9b-147d-4634-a312-719f1c62c7ff\") " pod="openstack/neutron-db-sync-95pfb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.204836 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ea69f39-6623-4a6e-adff-6014bb7c749b-logs\") pod \"horizon-594746f8cc-b7spb\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.204873 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvbbp\" (UniqueName: \"kubernetes.io/projected/6ea69f39-6623-4a6e-adff-6014bb7c749b-kube-api-access-dvbbp\") pod \"horizon-594746f8cc-b7spb\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.249198 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-594746f8cc-b7spb"] Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.329316 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd8fdb9b-147d-4634-a312-719f1c62c7ff-combined-ca-bundle\") pod \"neutron-db-sync-95pfb\" (UID: \"bd8fdb9b-147d-4634-a312-719f1c62c7ff\") " pod="openstack/neutron-db-sync-95pfb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.329390 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bd8fdb9b-147d-4634-a312-719f1c62c7ff-config\") pod \"neutron-db-sync-95pfb\" (UID: \"bd8fdb9b-147d-4634-a312-719f1c62c7ff\") " pod="openstack/neutron-db-sync-95pfb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.329412 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ea69f39-6623-4a6e-adff-6014bb7c749b-logs\") pod \"horizon-594746f8cc-b7spb\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.329446 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvbbp\" (UniqueName: \"kubernetes.io/projected/6ea69f39-6623-4a6e-adff-6014bb7c749b-kube-api-access-dvbbp\") pod \"horizon-594746f8cc-b7spb\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.329496 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6ea69f39-6623-4a6e-adff-6014bb7c749b-config-data\") pod \"horizon-594746f8cc-b7spb\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.329520 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6ea69f39-6623-4a6e-adff-6014bb7c749b-horizon-secret-key\") pod \"horizon-594746f8cc-b7spb\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.329555 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6ea69f39-6623-4a6e-adff-6014bb7c749b-scripts\") pod \"horizon-594746f8cc-b7spb\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.329594 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcq9s\" (UniqueName: \"kubernetes.io/projected/bd8fdb9b-147d-4634-a312-719f1c62c7ff-kube-api-access-rcq9s\") pod \"neutron-db-sync-95pfb\" (UID: \"bd8fdb9b-147d-4634-a312-719f1c62c7ff\") " pod="openstack/neutron-db-sync-95pfb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.340804 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6ea69f39-6623-4a6e-adff-6014bb7c749b-config-data\") pod \"horizon-594746f8cc-b7spb\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.367176 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6ea69f39-6623-4a6e-adff-6014bb7c749b-scripts\") pod \"horizon-594746f8cc-b7spb\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.370685 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ea69f39-6623-4a6e-adff-6014bb7c749b-logs\") pod \"horizon-594746f8cc-b7spb\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.371017 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd8fdb9b-147d-4634-a312-719f1c62c7ff-combined-ca-bundle\") pod \"neutron-db-sync-95pfb\" (UID: \"bd8fdb9b-147d-4634-a312-719f1c62c7ff\") " pod="openstack/neutron-db-sync-95pfb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.380010 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-95pfb"] Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.406994 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/bd8fdb9b-147d-4634-a312-719f1c62c7ff-config\") pod \"neutron-db-sync-95pfb\" (UID: \"bd8fdb9b-147d-4634-a312-719f1c62c7ff\") " pod="openstack/neutron-db-sync-95pfb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.448703 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6ea69f39-6623-4a6e-adff-6014bb7c749b-horizon-secret-key\") pod \"horizon-594746f8cc-b7spb\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.465034 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvbbp\" (UniqueName: \"kubernetes.io/projected/6ea69f39-6623-4a6e-adff-6014bb7c749b-kube-api-access-dvbbp\") pod \"horizon-594746f8cc-b7spb\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.471652 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcq9s\" (UniqueName: \"kubernetes.io/projected/bd8fdb9b-147d-4634-a312-719f1c62c7ff-kube-api-access-rcq9s\") pod \"neutron-db-sync-95pfb\" (UID: \"bd8fdb9b-147d-4634-a312-719f1c62c7ff\") " pod="openstack/neutron-db-sync-95pfb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.520760 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-k8mfj"] Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.540191 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-mmqwb"] Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.542099 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-mmqwb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.562996 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.581412 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.568201 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.591074 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-jwlw6" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.591260 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.607048 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.624619 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.624966 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.625253 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-5gn24" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.634670 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-mmqwb"] Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.646626 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/273a2195-27a6-4229-b3c0-8c67d9fc4302-logs\") pod \"placement-db-sync-mmqwb\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " pod="openstack/placement-db-sync-mmqwb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.646686 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-combined-ca-bundle\") pod \"placement-db-sync-mmqwb\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " pod="openstack/placement-db-sync-mmqwb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.646718 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-config-data\") pod \"placement-db-sync-mmqwb\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " pod="openstack/placement-db-sync-mmqwb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.646813 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7b9n9\" (UniqueName: \"kubernetes.io/projected/273a2195-27a6-4229-b3c0-8c67d9fc4302-kube-api-access-7b9n9\") pod \"placement-db-sync-mmqwb\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " pod="openstack/placement-db-sync-mmqwb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.646855 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-scripts\") pod \"placement-db-sync-mmqwb\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " pod="openstack/placement-db-sync-mmqwb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.687356 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-mdl7r"] Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.689426 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.697817 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.719432 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-95pfb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.730226 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.756323 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/273a2195-27a6-4229-b3c0-8c67d9fc4302-logs\") pod \"placement-db-sync-mmqwb\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " pod="openstack/placement-db-sync-mmqwb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.756411 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-combined-ca-bundle\") pod \"placement-db-sync-mmqwb\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " pod="openstack/placement-db-sync-mmqwb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.756473 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-config-data\") pod \"placement-db-sync-mmqwb\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " pod="openstack/placement-db-sync-mmqwb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.756514 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.756568 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.756665 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2331b62f-b8c2-4a35-b7d9-debf6073d98d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.756710 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.756753 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.756789 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-scripts\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.756833 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.756921 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.756951 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-config\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.756984 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7b9n9\" (UniqueName: \"kubernetes.io/projected/273a2195-27a6-4229-b3c0-8c67d9fc4302-kube-api-access-7b9n9\") pod \"placement-db-sync-mmqwb\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " pod="openstack/placement-db-sync-mmqwb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.757017 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.757072 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-scripts\") pod \"placement-db-sync-mmqwb\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " pod="openstack/placement-db-sync-mmqwb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.757108 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5j2q\" (UniqueName: \"kubernetes.io/projected/2331b62f-b8c2-4a35-b7d9-debf6073d98d-kube-api-access-g5j2q\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.757148 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hd8n5\" (UniqueName: \"kubernetes.io/projected/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-kube-api-access-hd8n5\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.757203 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-config-data\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.757257 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2331b62f-b8c2-4a35-b7d9-debf6073d98d-logs\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.757917 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/273a2195-27a6-4229-b3c0-8c67d9fc4302-logs\") pod \"placement-db-sync-mmqwb\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " pod="openstack/placement-db-sync-mmqwb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.768204 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-vkzk9"] Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.769961 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-vkzk9" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.778383 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-rtx45" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.778592 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.786311 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-config-data\") pod \"placement-db-sync-mmqwb\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " pod="openstack/placement-db-sync-mmqwb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.786781 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-combined-ca-bundle\") pod \"placement-db-sync-mmqwb\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " pod="openstack/placement-db-sync-mmqwb" Jan 30 12:18:50 crc kubenswrapper[4703]: I0130 12:18:50.788902 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-scripts\") pod \"placement-db-sync-mmqwb\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " pod="openstack/placement-db-sync-mmqwb" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:50.848291 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7b9n9\" (UniqueName: \"kubernetes.io/projected/273a2195-27a6-4229-b3c0-8c67d9fc4302-kube-api-access-7b9n9\") pod \"placement-db-sync-mmqwb\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " pod="openstack/placement-db-sync-mmqwb" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:50.873982 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.462912 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.463051 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2331b62f-b8c2-4a35-b7d9-debf6073d98d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.463083 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.463139 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.463164 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-scripts\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.463236 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.463346 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.463374 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-config\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.463414 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.463496 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5j2q\" (UniqueName: \"kubernetes.io/projected/2331b62f-b8c2-4a35-b7d9-debf6073d98d-kube-api-access-g5j2q\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.463522 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hd8n5\" (UniqueName: \"kubernetes.io/projected/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-kube-api-access-hd8n5\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.463575 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-config-data\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.463627 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2331b62f-b8c2-4a35-b7d9-debf6073d98d-logs\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.470751 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.437005 4703 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podf4a552c6-d8b7-470e-aaae-01d7d29d9cb5"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podf4a552c6-d8b7-470e-aaae-01d7d29d9cb5] : Timed out while waiting for systemd to remove kubepods-besteffort-podf4a552c6_d8b7_470e_aaae_01d7d29d9cb5.slice" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.343965 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.474291 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.474315 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-mdl7r"] Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.522766 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-dns-swift-storage-0\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.523621 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2331b62f-b8c2-4a35-b7d9-debf6073d98d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.524965 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-scripts\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.526308 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-dns-svc\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.527223 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-ovsdbserver-nb\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.540431 4703 generic.go:334] "Generic (PLEG): container finished" podID="b2a66906-bc0b-419a-9304-6f623d26d9d4" containerID="cbb31278c1f8397b1644ae1d0a8c7040513e4a4caed5da8a559c0866c5e5889c" exitCode=0 Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.540766 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-mmqwb" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.541367 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.544281 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2331b62f-b8c2-4a35-b7d9-debf6073d98d-logs\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.557440 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-ovsdbserver-sb\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.566892 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-combined-ca-bundle\") pod \"barbican-db-sync-vkzk9\" (UID: \"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70\") " pod="openstack/barbican-db-sync-vkzk9" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.567035 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-db-sync-config-data\") pod \"barbican-db-sync-vkzk9\" (UID: \"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70\") " pod="openstack/barbican-db-sync-vkzk9" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.572733 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.581889 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-config\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.588326 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmlb5\" (UniqueName: \"kubernetes.io/projected/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-kube-api-access-xmlb5\") pod \"barbican-db-sync-vkzk9\" (UID: \"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70\") " pod="openstack/barbican-db-sync-vkzk9" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.624469 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-config-data\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.625518 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hd8n5\" (UniqueName: \"kubernetes.io/projected/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-kube-api-access-hd8n5\") pod \"dnsmasq-dns-785d8bcb8c-mdl7r\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.626325 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5j2q\" (UniqueName: \"kubernetes.io/projected/2331b62f-b8c2-4a35-b7d9-debf6073d98d-kube-api-access-g5j2q\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.639500 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-vkzk9"] Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.639598 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.639615 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" event={"ID":"b2a66906-bc0b-419a-9304-6f623d26d9d4","Type":"ContainerDied","Data":"cbb31278c1f8397b1644ae1d0a8c7040513e4a4caed5da8a559c0866c5e5889c"} Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.639644 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.652897 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.655569 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.660720 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.661071 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.689913 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.691980 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.692047 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gzxc\" (UniqueName: \"kubernetes.io/projected/572a81eb-68df-470a-9ca8-1febfc6167ad-kube-api-access-9gzxc\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.692093 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-combined-ca-bundle\") pod \"barbican-db-sync-vkzk9\" (UID: \"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70\") " pod="openstack/barbican-db-sync-vkzk9" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.692142 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-db-sync-config-data\") pod \"barbican-db-sync-vkzk9\" (UID: \"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70\") " pod="openstack/barbican-db-sync-vkzk9" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.692166 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.692195 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmlb5\" (UniqueName: \"kubernetes.io/projected/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-kube-api-access-xmlb5\") pod \"barbican-db-sync-vkzk9\" (UID: \"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70\") " pod="openstack/barbican-db-sync-vkzk9" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.692246 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/572a81eb-68df-470a-9ca8-1febfc6167ad-run-httpd\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.692280 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-scripts\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.692538 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-config-data\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.692635 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/572a81eb-68df-470a-9ca8-1febfc6167ad-log-httpd\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.720629 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.726306 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-combined-ca-bundle\") pod \"barbican-db-sync-vkzk9\" (UID: \"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70\") " pod="openstack/barbican-db-sync-vkzk9" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.731660 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-db-sync-config-data\") pod \"barbican-db-sync-vkzk9\" (UID: \"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70\") " pod="openstack/barbican-db-sync-vkzk9" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.732491 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5bbf599fb5-drlk7"] Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.753825 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmlb5\" (UniqueName: \"kubernetes.io/projected/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-kube-api-access-xmlb5\") pod \"barbican-db-sync-vkzk9\" (UID: \"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70\") " pod="openstack/barbican-db-sync-vkzk9" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.756744 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.815204 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.819475 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/572a81eb-68df-470a-9ca8-1febfc6167ad-run-httpd\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.819597 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-scripts\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.819779 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-config-data\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.819876 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/572a81eb-68df-470a-9ca8-1febfc6167ad-log-httpd\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.820042 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.820113 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gzxc\" (UniqueName: \"kubernetes.io/projected/572a81eb-68df-470a-9ca8-1febfc6167ad-kube-api-access-9gzxc\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.825008 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/572a81eb-68df-470a-9ca8-1febfc6167ad-log-httpd\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.832904 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/572a81eb-68df-470a-9ca8-1febfc6167ad-run-httpd\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.846632 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-config-data\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.847430 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.858001 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.859569 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-scripts\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.877899 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.879563 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.881005 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.884392 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2a66906-bc0b-419a-9304-6f623d26d9d4" containerName="dnsmasq-dns" Jan 30 12:18:51 crc kubenswrapper[4703]: E0130 12:18:51.887538 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2a66906-bc0b-419a-9304-6f623d26d9d4" containerName="dnsmasq-dns" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.887737 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2a66906-bc0b-419a-9304-6f623d26d9d4" containerName="dnsmasq-dns" Jan 30 12:18:51 crc kubenswrapper[4703]: E0130 12:18:51.896761 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2a66906-bc0b-419a-9304-6f623d26d9d4" containerName="init" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.896811 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2a66906-bc0b-419a-9304-6f623d26d9d4" containerName="init" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.910396 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gzxc\" (UniqueName: \"kubernetes.io/projected/572a81eb-68df-470a-9ca8-1febfc6167ad-kube-api-access-9gzxc\") pod \"ceilometer-0\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " pod="openstack/ceilometer-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.927310 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2ac378ee-c96a-41cb-881b-0100056b27ad-horizon-secret-key\") pod \"horizon-5bbf599fb5-drlk7\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.927449 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2ac378ee-c96a-41cb-881b-0100056b27ad-config-data\") pod \"horizon-5bbf599fb5-drlk7\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.927647 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ac378ee-c96a-41cb-881b-0100056b27ad-logs\") pod \"horizon-5bbf599fb5-drlk7\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.927795 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2ac378ee-c96a-41cb-881b-0100056b27ad-scripts\") pod \"horizon-5bbf599fb5-drlk7\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.927943 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkld4\" (UniqueName: \"kubernetes.io/projected/2ac378ee-c96a-41cb-881b-0100056b27ad-kube-api-access-gkld4\") pod \"horizon-5bbf599fb5-drlk7\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.928074 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.953626 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5bbf599fb5-drlk7"] Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.955450 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 30 12:18:51 crc kubenswrapper[4703]: I0130 12:18:51.956108 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.000524 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-vkzk9" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.031465 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-dns-swift-storage-0\") pod \"b2a66906-bc0b-419a-9304-6f623d26d9d4\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.031542 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-dns-svc\") pod \"b2a66906-bc0b-419a-9304-6f623d26d9d4\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.031613 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-ovsdbserver-nb\") pod \"b2a66906-bc0b-419a-9304-6f623d26d9d4\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.031681 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-ovsdbserver-sb\") pod \"b2a66906-bc0b-419a-9304-6f623d26d9d4\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.031761 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7dpl2\" (UniqueName: \"kubernetes.io/projected/b2a66906-bc0b-419a-9304-6f623d26d9d4-kube-api-access-7dpl2\") pod \"b2a66906-bc0b-419a-9304-6f623d26d9d4\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.031795 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-config\") pod \"b2a66906-bc0b-419a-9304-6f623d26d9d4\" (UID: \"b2a66906-bc0b-419a-9304-6f623d26d9d4\") " Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.033294 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.033366 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.033415 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ac378ee-c96a-41cb-881b-0100056b27ad-logs\") pod \"horizon-5bbf599fb5-drlk7\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.033841 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b2c7636-4ea7-4d61-9219-873ab28aa505-logs\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.033871 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7b2c7636-4ea7-4d61-9219-873ab28aa505-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.033891 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2ac378ee-c96a-41cb-881b-0100056b27ad-scripts\") pod \"horizon-5bbf599fb5-drlk7\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.033973 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.034050 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkld4\" (UniqueName: \"kubernetes.io/projected/2ac378ee-c96a-41cb-881b-0100056b27ad-kube-api-access-gkld4\") pod \"horizon-5bbf599fb5-drlk7\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.034190 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x94qw\" (UniqueName: \"kubernetes.io/projected/7b2c7636-4ea7-4d61-9219-873ab28aa505-kube-api-access-x94qw\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.034252 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.034269 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2ac378ee-c96a-41cb-881b-0100056b27ad-horizon-secret-key\") pod \"horizon-5bbf599fb5-drlk7\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.034315 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.034337 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2ac378ee-c96a-41cb-881b-0100056b27ad-config-data\") pod \"horizon-5bbf599fb5-drlk7\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.035903 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2ac378ee-c96a-41cb-881b-0100056b27ad-config-data\") pod \"horizon-5bbf599fb5-drlk7\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.041820 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ac378ee-c96a-41cb-881b-0100056b27ad-logs\") pod \"horizon-5bbf599fb5-drlk7\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.042854 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.054433 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2ac378ee-c96a-41cb-881b-0100056b27ad-scripts\") pod \"horizon-5bbf599fb5-drlk7\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.062911 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2a66906-bc0b-419a-9304-6f623d26d9d4-kube-api-access-7dpl2" (OuterVolumeSpecName: "kube-api-access-7dpl2") pod "b2a66906-bc0b-419a-9304-6f623d26d9d4" (UID: "b2a66906-bc0b-419a-9304-6f623d26d9d4"). InnerVolumeSpecName "kube-api-access-7dpl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.109451 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-dswjd"] Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.117444 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2ac378ee-c96a-41cb-881b-0100056b27ad-horizon-secret-key\") pod \"horizon-5bbf599fb5-drlk7\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.130886 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkld4\" (UniqueName: \"kubernetes.io/projected/2ac378ee-c96a-41cb-881b-0100056b27ad-kube-api-access-gkld4\") pod \"horizon-5bbf599fb5-drlk7\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.138650 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.138740 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.138814 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.138846 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.138888 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b2c7636-4ea7-4d61-9219-873ab28aa505-logs\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.138910 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7b2c7636-4ea7-4d61-9219-873ab28aa505-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.138948 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.139017 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x94qw\" (UniqueName: \"kubernetes.io/projected/7b2c7636-4ea7-4d61-9219-873ab28aa505-kube-api-access-x94qw\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.139137 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7dpl2\" (UniqueName: \"kubernetes.io/projected/b2a66906-bc0b-419a-9304-6f623d26d9d4-kube-api-access-7dpl2\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.139825 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.161337 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b2c7636-4ea7-4d61-9219-873ab28aa505-logs\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.161684 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7b2c7636-4ea7-4d61-9219-873ab28aa505-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.165989 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.177812 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-scripts\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.178893 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-config-data\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.185943 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.186725 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.189281 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x94qw\" (UniqueName: \"kubernetes.io/projected/7b2c7636-4ea7-4d61-9219-873ab28aa505-kube-api-access-x94qw\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.250689 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.251643 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.283287 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.334514 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b2a66906-bc0b-419a-9304-6f623d26d9d4" (UID: "b2a66906-bc0b-419a-9304-6f623d26d9d4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.347896 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.535133 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b2a66906-bc0b-419a-9304-6f623d26d9d4" (UID: "b2a66906-bc0b-419a-9304-6f623d26d9d4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.537169 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-config" (OuterVolumeSpecName: "config") pod "b2a66906-bc0b-419a-9304-6f623d26d9d4" (UID: "b2a66906-bc0b-419a-9304-6f623d26d9d4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.537685 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b2a66906-bc0b-419a-9304-6f623d26d9d4" (UID: "b2a66906-bc0b-419a-9304-6f623d26d9d4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.538746 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b2a66906-bc0b-419a-9304-6f623d26d9d4" (UID: "b2a66906-bc0b-419a-9304-6f623d26d9d4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.553558 4703 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.553616 4703 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.553636 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.553648 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b2a66906-bc0b-419a-9304-6f623d26d9d4-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.595839 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dswjd" event={"ID":"ba6892f1-1087-4650-b6fe-701fc48c94f7","Type":"ContainerStarted","Data":"cb799189d015b16b673cc317f47ab87a35228a617ae9afcd7dffdc4e91cafabe"} Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.611877 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-k8mfj"] Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.612427 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.612482 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" event={"ID":"b2a66906-bc0b-419a-9304-6f623d26d9d4","Type":"ContainerDied","Data":"7c63c1b3016826510d9bf25b56b6270cc9894b7abc94cbb343fa5e9f5bfe812a"} Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.612543 4703 scope.go:117] "RemoveContainer" containerID="cbb31278c1f8397b1644ae1d0a8c7040513e4a4caed5da8a559c0866c5e5889c" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.627717 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-scrgs"] Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.794526 4703 scope.go:117] "RemoveContainer" containerID="91ac5467a73d0e8ae35204f202883e8aec17e7102bc78779a790a45040ae8bdf" Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.797056 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-mmqwb"] Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.857606 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-6rwpn"] Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.879825 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-74f6bcbc87-6rwpn"] Jan 30 12:18:52 crc kubenswrapper[4703]: W0130 12:18:52.889288 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6ea69f39_6623_4a6e_adff_6014bb7c749b.slice/crio-a890a73d141d07cd66317fd9f8a89cd5039e4040f5a063824abb086ee524cafc WatchSource:0}: Error finding container a890a73d141d07cd66317fd9f8a89cd5039e4040f5a063824abb086ee524cafc: Status 404 returned error can't find the container with id a890a73d141d07cd66317fd9f8a89cd5039e4040f5a063824abb086ee524cafc Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.892690 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-594746f8cc-b7spb"] Jan 30 12:18:52 crc kubenswrapper[4703]: W0130 12:18:52.893850 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod638f2b9b_25b1_40b5_bb6a_78a9e5e8fa84.slice/crio-237f7167f7136778eeb1feafe9ae884bdf8f50a35f08763339b9784e19e600ce WatchSource:0}: Error finding container 237f7167f7136778eeb1feafe9ae884bdf8f50a35f08763339b9784e19e600ce: Status 404 returned error can't find the container with id 237f7167f7136778eeb1feafe9ae884bdf8f50a35f08763339b9784e19e600ce Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.904070 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-mdl7r"] Jan 30 12:18:52 crc kubenswrapper[4703]: I0130 12:18:52.984717 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-95pfb"] Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.166585 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2a66906-bc0b-419a-9304-6f623d26d9d4" path="/var/lib/kubelet/pods/b2a66906-bc0b-419a-9304-6f623d26d9d4/volumes" Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.167571 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.221338 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-594746f8cc-b7spb"] Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.380541 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-85698bccdf-jvv85"] Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.382809 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.420067 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f15d17d2-e8d6-49c8-a313-d815865271f0-config-data\") pod \"horizon-85698bccdf-jvv85\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.420704 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdltv\" (UniqueName: \"kubernetes.io/projected/f15d17d2-e8d6-49c8-a313-d815865271f0-kube-api-access-wdltv\") pod \"horizon-85698bccdf-jvv85\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.420744 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f15d17d2-e8d6-49c8-a313-d815865271f0-logs\") pod \"horizon-85698bccdf-jvv85\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.420845 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f15d17d2-e8d6-49c8-a313-d815865271f0-horizon-secret-key\") pod \"horizon-85698bccdf-jvv85\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.420899 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f15d17d2-e8d6-49c8-a313-d815865271f0-scripts\") pod \"horizon-85698bccdf-jvv85\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.439235 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-85698bccdf-jvv85"] Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.475534 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.513216 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.522777 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f15d17d2-e8d6-49c8-a313-d815865271f0-horizon-secret-key\") pod \"horizon-85698bccdf-jvv85\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.522881 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f15d17d2-e8d6-49c8-a313-d815865271f0-scripts\") pod \"horizon-85698bccdf-jvv85\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.522948 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f15d17d2-e8d6-49c8-a313-d815865271f0-config-data\") pod \"horizon-85698bccdf-jvv85\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.523029 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdltv\" (UniqueName: \"kubernetes.io/projected/f15d17d2-e8d6-49c8-a313-d815865271f0-kube-api-access-wdltv\") pod \"horizon-85698bccdf-jvv85\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.523069 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f15d17d2-e8d6-49c8-a313-d815865271f0-logs\") pod \"horizon-85698bccdf-jvv85\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.523841 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f15d17d2-e8d6-49c8-a313-d815865271f0-logs\") pod \"horizon-85698bccdf-jvv85\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.524626 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f15d17d2-e8d6-49c8-a313-d815865271f0-scripts\") pod \"horizon-85698bccdf-jvv85\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.532560 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f15d17d2-e8d6-49c8-a313-d815865271f0-horizon-secret-key\") pod \"horizon-85698bccdf-jvv85\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.535227 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f15d17d2-e8d6-49c8-a313-d815865271f0-config-data\") pod \"horizon-85698bccdf-jvv85\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.553897 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5bbf599fb5-drlk7"] Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.554552 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdltv\" (UniqueName: \"kubernetes.io/projected/f15d17d2-e8d6-49c8-a313-d815865271f0-kube-api-access-wdltv\") pod \"horizon-85698bccdf-jvv85\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.581823 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-vkzk9"] Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.593985 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.652968 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 30 12:18:53 crc kubenswrapper[4703]: I0130 12:18:53.685953 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dswjd" event={"ID":"ba6892f1-1087-4650-b6fe-701fc48c94f7","Type":"ContainerStarted","Data":"5d4de2c96495b8e480e0ca04a0935c3e72b6eaf5f09e6e682c24a701d9048831"} Jan 30 12:18:54 crc kubenswrapper[4703]: I0130 12:18:54.042759 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-594746f8cc-b7spb" event={"ID":"6ea69f39-6623-4a6e-adff-6014bb7c749b","Type":"ContainerStarted","Data":"a890a73d141d07cd66317fd9f8a89cd5039e4040f5a063824abb086ee524cafc"} Jan 30 12:18:54 crc kubenswrapper[4703]: I0130 12:18:54.093249 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-dswjd" podStartSLOduration=6.093223434 podStartE2EDuration="6.093223434s" podCreationTimestamp="2026-01-30 12:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:18:54.083471591 +0000 UTC m=+1369.861293245" watchObservedRunningTime="2026-01-30 12:18:54.093223434 +0000 UTC m=+1369.871045088" Jan 30 12:18:54 crc kubenswrapper[4703]: I0130 12:18:54.118728 4703 generic.go:334] "Generic (PLEG): container finished" podID="caa1e059-1d8a-4297-8f2e-7be62ac2b1e2" containerID="d546b040c8a49fe237c0800c5cf80123c43a319974cd4f0d09d613769ce15da1" exitCode=0 Jan 30 12:18:54 crc kubenswrapper[4703]: I0130 12:18:54.118853 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" event={"ID":"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2","Type":"ContainerDied","Data":"d546b040c8a49fe237c0800c5cf80123c43a319974cd4f0d09d613769ce15da1"} Jan 30 12:18:54 crc kubenswrapper[4703]: I0130 12:18:54.118887 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" event={"ID":"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2","Type":"ContainerStarted","Data":"529cce648e419c610d8e32a8affdd6f7ff026ef74d68f313413f73320b5559f3"} Jan 30 12:18:54 crc kubenswrapper[4703]: I0130 12:18:54.136604 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-scrgs" event={"ID":"1665113c-fcaa-4a13-9de2-552579864e44","Type":"ContainerStarted","Data":"91a4c6344336e8c730ae40725c1fd99e5bd6410bb1a0b83bfb28844435442efc"} Jan 30 12:18:54 crc kubenswrapper[4703]: I0130 12:18:54.144431 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" event={"ID":"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84","Type":"ContainerStarted","Data":"237f7167f7136778eeb1feafe9ae884bdf8f50a35f08763339b9784e19e600ce"} Jan 30 12:18:54 crc kubenswrapper[4703]: I0130 12:18:54.152449 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-vkzk9" event={"ID":"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70","Type":"ContainerStarted","Data":"f36d6b4492ef8975f36878ebcb27e717feaec0146fd9720b3a0fbbbcbada8bea"} Jan 30 12:18:54 crc kubenswrapper[4703]: I0130 12:18:54.173558 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-95pfb" event={"ID":"bd8fdb9b-147d-4634-a312-719f1c62c7ff","Type":"ContainerStarted","Data":"6dcbea808c1e6ab36bb68a56e173af69dedb9e73ac24c2601a2deb7572065251"} Jan 30 12:18:54 crc kubenswrapper[4703]: I0130 12:18:54.173638 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-95pfb" event={"ID":"bd8fdb9b-147d-4634-a312-719f1c62c7ff","Type":"ContainerStarted","Data":"03c6baab368e9f1480617e8edba15643f71e8f35ae9be432e140a7f96788135e"} Jan 30 12:18:54 crc kubenswrapper[4703]: I0130 12:18:54.177652 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"572a81eb-68df-470a-9ca8-1febfc6167ad","Type":"ContainerStarted","Data":"b05f4e7a82c3437a39d5b2966ed03267f9dce9ce85f5df36a02d00f2258df1aa"} Jan 30 12:18:54 crc kubenswrapper[4703]: I0130 12:18:54.197931 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:18:54 crc kubenswrapper[4703]: I0130 12:18:54.204198 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-mmqwb" event={"ID":"273a2195-27a6-4229-b3c0-8c67d9fc4302","Type":"ContainerStarted","Data":"627251f02791c2b24b276cbac63b5751958cfaa5e2ac6fd4e551bd54197fe205"} Jan 30 12:18:54 crc kubenswrapper[4703]: I0130 12:18:54.211086 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5bbf599fb5-drlk7" event={"ID":"2ac378ee-c96a-41cb-881b-0100056b27ad","Type":"ContainerStarted","Data":"4d06a5d6bf066d28fa37820403815da2077b5c8170ade5776487434b0130eb74"} Jan 30 12:18:54 crc kubenswrapper[4703]: I0130 12:18:54.245496 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-95pfb" podStartSLOduration=5.245471091 podStartE2EDuration="5.245471091s" podCreationTimestamp="2026-01-30 12:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:18:54.224086295 +0000 UTC m=+1370.001907949" watchObservedRunningTime="2026-01-30 12:18:54.245471091 +0000 UTC m=+1370.023292745" Jan 30 12:18:54 crc kubenswrapper[4703]: I0130 12:18:54.537795 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 30 12:18:54 crc kubenswrapper[4703]: I0130 12:18:54.925168 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.105841 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-dns-swift-storage-0\") pod \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.105928 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-ovsdbserver-sb\") pod \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.106028 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-ovsdbserver-nb\") pod \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.106048 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-config\") pod \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.106074 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrsxg\" (UniqueName: \"kubernetes.io/projected/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-kube-api-access-xrsxg\") pod \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.112628 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-dns-svc\") pod \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\" (UID: \"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2\") " Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.414729 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-kube-api-access-xrsxg" (OuterVolumeSpecName: "kube-api-access-xrsxg") pod "caa1e059-1d8a-4297-8f2e-7be62ac2b1e2" (UID: "caa1e059-1d8a-4297-8f2e-7be62ac2b1e2"). InnerVolumeSpecName "kube-api-access-xrsxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.434362 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "caa1e059-1d8a-4297-8f2e-7be62ac2b1e2" (UID: "caa1e059-1d8a-4297-8f2e-7be62ac2b1e2"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.491553 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "caa1e059-1d8a-4297-8f2e-7be62ac2b1e2" (UID: "caa1e059-1d8a-4297-8f2e-7be62ac2b1e2"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.492003 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "caa1e059-1d8a-4297-8f2e-7be62ac2b1e2" (UID: "caa1e059-1d8a-4297-8f2e-7be62ac2b1e2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.494940 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "caa1e059-1d8a-4297-8f2e-7be62ac2b1e2" (UID: "caa1e059-1d8a-4297-8f2e-7be62ac2b1e2"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.514464 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2331b62f-b8c2-4a35-b7d9-debf6073d98d","Type":"ContainerStarted","Data":"aca6b451d4b89578c280f22d268b86dc5b41f136db3868b7f69403e84a26460a"} Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.514522 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-85698bccdf-jvv85"] Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.546302 4703 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.546347 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.546370 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.546380 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrsxg\" (UniqueName: \"kubernetes.io/projected/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-kube-api-access-xrsxg\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.546393 4703 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.589780 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7b2c7636-4ea7-4d61-9219-873ab28aa505","Type":"ContainerStarted","Data":"29f3626e7d57e86c28a7291d2cb497ebeb53972506c7425273d7b355d198fb59"} Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.633449 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-config" (OuterVolumeSpecName: "config") pod "caa1e059-1d8a-4297-8f2e-7be62ac2b1e2" (UID: "caa1e059-1d8a-4297-8f2e-7be62ac2b1e2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.648615 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:18:55 crc kubenswrapper[4703]: W0130 12:18:55.656148 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf15d17d2_e8d6_49c8_a313_d815865271f0.slice/crio-c705c7a0f3a02f2118d4b0fa102e7bc7394b936a8c5c4cd901d4b4aa2bf61ce4 WatchSource:0}: Error finding container c705c7a0f3a02f2118d4b0fa102e7bc7394b936a8c5c4cd901d4b4aa2bf61ce4: Status 404 returned error can't find the container with id c705c7a0f3a02f2118d4b0fa102e7bc7394b936a8c5c4cd901d4b4aa2bf61ce4 Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.656591 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" event={"ID":"caa1e059-1d8a-4297-8f2e-7be62ac2b1e2","Type":"ContainerDied","Data":"529cce648e419c610d8e32a8affdd6f7ff026ef74d68f313413f73320b5559f3"} Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.656654 4703 scope.go:117] "RemoveContainer" containerID="d546b040c8a49fe237c0800c5cf80123c43a319974cd4f0d09d613769ce15da1" Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.656838 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-847c4cc679-k8mfj" Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.680064 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-vhshs" event={"ID":"cd3bf27b-46bc-468e-8735-3a3ed6eda272","Type":"ContainerStarted","Data":"1cf4f25cf82f53ff27b32327a97dd0d804b3013f6aecfdf31873c1098401e568"} Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.711899 4703 generic.go:334] "Generic (PLEG): container finished" podID="638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" containerID="8fe7b8bd4dc90f3274209a5000c44bb8dbe912ba13e8d4d28647303849c18237" exitCode=0 Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.712207 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" event={"ID":"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84","Type":"ContainerDied","Data":"8fe7b8bd4dc90f3274209a5000c44bb8dbe912ba13e8d4d28647303849c18237"} Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.936671 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-k8mfj"] Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.948907 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-847c4cc679-k8mfj"] Jan 30 12:18:55 crc kubenswrapper[4703]: I0130 12:18:55.950933 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-db-sync-vhshs" podStartSLOduration=7.186598445 podStartE2EDuration="43.950902243s" podCreationTimestamp="2026-01-30 12:18:12 +0000 UTC" firstStartedPulling="2026-01-30 12:18:16.214003275 +0000 UTC m=+1331.991824929" lastFinishedPulling="2026-01-30 12:18:52.978307073 +0000 UTC m=+1368.756128727" observedRunningTime="2026-01-30 12:18:55.932708593 +0000 UTC m=+1371.710530247" watchObservedRunningTime="2026-01-30 12:18:55.950902243 +0000 UTC m=+1371.728723897" Jan 30 12:18:56 crc kubenswrapper[4703]: I0130 12:18:56.796931 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-74f6bcbc87-6rwpn" podUID="b2a66906-bc0b-419a-9304-6f623d26d9d4" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.145:5353: i/o timeout" Jan 30 12:18:56 crc kubenswrapper[4703]: I0130 12:18:56.903282 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" event={"ID":"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84","Type":"ContainerStarted","Data":"4724661b321f6ce5f753731575ce55e816165e7242955e0a3f3e31646fd199b6"} Jan 30 12:18:56 crc kubenswrapper[4703]: I0130 12:18:56.905794 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7b2c7636-4ea7-4d61-9219-873ab28aa505","Type":"ContainerStarted","Data":"bd745ea6c869be409bdaa402f9e724e868a803788204b03ca22ab690204d1db5"} Jan 30 12:18:56 crc kubenswrapper[4703]: I0130 12:18:56.969417 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85698bccdf-jvv85" event={"ID":"f15d17d2-e8d6-49c8-a313-d815865271f0","Type":"ContainerStarted","Data":"c705c7a0f3a02f2118d4b0fa102e7bc7394b936a8c5c4cd901d4b4aa2bf61ce4"} Jan 30 12:18:57 crc kubenswrapper[4703]: I0130 12:18:57.231396 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="caa1e059-1d8a-4297-8f2e-7be62ac2b1e2" path="/var/lib/kubelet/pods/caa1e059-1d8a-4297-8f2e-7be62ac2b1e2/volumes" Jan 30 12:18:58 crc kubenswrapper[4703]: I0130 12:18:58.022898 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2331b62f-b8c2-4a35-b7d9-debf6073d98d","Type":"ContainerStarted","Data":"a5deef2cba61430c89a34c4bd9c1840d91254e070c680c12329c9ef8981e767b"} Jan 30 12:18:58 crc kubenswrapper[4703]: I0130 12:18:58.022984 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:18:58 crc kubenswrapper[4703]: I0130 12:18:58.056614 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" podStartSLOduration=8.056576465 podStartE2EDuration="8.056576465s" podCreationTimestamp="2026-01-30 12:18:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:18:58.044603952 +0000 UTC m=+1373.822425606" watchObservedRunningTime="2026-01-30 12:18:58.056576465 +0000 UTC m=+1373.834398119" Jan 30 12:18:59 crc kubenswrapper[4703]: I0130 12:18:59.813165 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5bbf599fb5-drlk7"] Jan 30 12:18:59 crc kubenswrapper[4703]: I0130 12:18:59.850732 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5f9958979d-8h859"] Jan 30 12:18:59 crc kubenswrapper[4703]: E0130 12:18:59.852737 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caa1e059-1d8a-4297-8f2e-7be62ac2b1e2" containerName="init" Jan 30 12:18:59 crc kubenswrapper[4703]: I0130 12:18:59.852774 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="caa1e059-1d8a-4297-8f2e-7be62ac2b1e2" containerName="init" Jan 30 12:18:59 crc kubenswrapper[4703]: I0130 12:18:59.853363 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="caa1e059-1d8a-4297-8f2e-7be62ac2b1e2" containerName="init" Jan 30 12:18:59 crc kubenswrapper[4703]: I0130 12:18:59.858489 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:18:59 crc kubenswrapper[4703]: I0130 12:18:59.863199 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Jan 30 12:18:59 crc kubenswrapper[4703]: I0130 12:18:59.891859 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5f9958979d-8h859"] Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.278872 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b888ea51-970d-4f4d-9e5c-f456ca173472-logs\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.278920 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-combined-ca-bundle\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.279021 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-horizon-tls-certs\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.279063 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b888ea51-970d-4f4d-9e5c-f456ca173472-config-data\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.279089 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b888ea51-970d-4f4d-9e5c-f456ca173472-scripts\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.279110 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-horizon-secret-key\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.279154 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpxgw\" (UniqueName: \"kubernetes.io/projected/b888ea51-970d-4f4d-9e5c-f456ca173472-kube-api-access-bpxgw\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.360207 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-85698bccdf-jvv85"] Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.381371 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-horizon-tls-certs\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.381483 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b888ea51-970d-4f4d-9e5c-f456ca173472-config-data\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.381544 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b888ea51-970d-4f4d-9e5c-f456ca173472-scripts\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.381577 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-horizon-secret-key\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.381631 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpxgw\" (UniqueName: \"kubernetes.io/projected/b888ea51-970d-4f4d-9e5c-f456ca173472-kube-api-access-bpxgw\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.381701 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b888ea51-970d-4f4d-9e5c-f456ca173472-logs\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.381739 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-combined-ca-bundle\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.383170 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b888ea51-970d-4f4d-9e5c-f456ca173472-scripts\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.384233 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b888ea51-970d-4f4d-9e5c-f456ca173472-config-data\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.385469 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b888ea51-970d-4f4d-9e5c-f456ca173472-logs\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.392201 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-horizon-secret-key\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.400626 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-horizon-tls-certs\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.417107 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-combined-ca-bundle\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.430105 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-77fb4cf9b8-pw692"] Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.435366 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.454495 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-77fb4cf9b8-pw692"] Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.468376 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpxgw\" (UniqueName: \"kubernetes.io/projected/b888ea51-970d-4f4d-9e5c-f456ca173472-kube-api-access-bpxgw\") pod \"horizon-5f9958979d-8h859\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.483803 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-scripts\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.483879 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-logs\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.483938 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7p6cf\" (UniqueName: \"kubernetes.io/projected/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-kube-api-access-7p6cf\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.484069 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-horizon-secret-key\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.484240 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-combined-ca-bundle\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.484302 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-config-data\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.484326 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-horizon-tls-certs\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.499584 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.587835 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-config-data\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.587911 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-horizon-tls-certs\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.587999 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-scripts\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.588024 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-logs\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.588065 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7p6cf\" (UniqueName: \"kubernetes.io/projected/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-kube-api-access-7p6cf\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.588213 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-horizon-secret-key\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.588259 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-combined-ca-bundle\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.590282 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-scripts\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.591310 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-config-data\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.592728 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-logs\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.594870 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-horizon-secret-key\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.613767 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-combined-ca-bundle\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.615325 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-horizon-tls-certs\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.624067 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7p6cf\" (UniqueName: \"kubernetes.io/projected/9c6d3262-7469-45ac-b5c8-9eb0f9456a5a-kube-api-access-7p6cf\") pod \"horizon-77fb4cf9b8-pw692\" (UID: \"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a\") " pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:00 crc kubenswrapper[4703]: I0130 12:19:00.877735 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:01 crc kubenswrapper[4703]: I0130 12:19:01.346539 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7b2c7636-4ea7-4d61-9219-873ab28aa505","Type":"ContainerStarted","Data":"e128946526eb2175075f1a0601ec019e014a1687950b6228c2e56f2911741391"} Jan 30 12:19:01 crc kubenswrapper[4703]: I0130 12:19:01.350116 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2331b62f-b8c2-4a35-b7d9-debf6073d98d","Type":"ContainerStarted","Data":"1c49598ce207ab5e2b91b7d3e1ff49344bfe38d72a42aa59b7368f91110d0b0d"} Jan 30 12:19:01 crc kubenswrapper[4703]: I0130 12:19:01.656256 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:19:01 crc kubenswrapper[4703]: I0130 12:19:01.741557 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-hjndc"] Jan 30 12:19:01 crc kubenswrapper[4703]: I0130 12:19:01.742247 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-764c5664d7-hjndc" podUID="bea2808f-b06b-4388-865f-9cac9ca53857" containerName="dnsmasq-dns" containerID="cri-o://2afe21d8bff3648d5aadae798fd8612bf1d4bd06128b70afbd57129731c29508" gracePeriod=10 Jan 30 12:19:02 crc kubenswrapper[4703]: I0130 12:19:02.671930 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="7b2c7636-4ea7-4d61-9219-873ab28aa505" containerName="glance-log" containerID="cri-o://bd745ea6c869be409bdaa402f9e724e868a803788204b03ca22ab690204d1db5" gracePeriod=30 Jan 30 12:19:02 crc kubenswrapper[4703]: I0130 12:19:02.672490 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="7b2c7636-4ea7-4d61-9219-873ab28aa505" containerName="glance-httpd" containerID="cri-o://e128946526eb2175075f1a0601ec019e014a1687950b6228c2e56f2911741391" gracePeriod=30 Jan 30 12:19:02 crc kubenswrapper[4703]: I0130 12:19:02.709622 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=12.709595024 podStartE2EDuration="12.709595024s" podCreationTimestamp="2026-01-30 12:18:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:19:02.709519962 +0000 UTC m=+1378.487341616" watchObservedRunningTime="2026-01-30 12:19:02.709595024 +0000 UTC m=+1378.487416688" Jan 30 12:19:02 crc kubenswrapper[4703]: E0130 12:19:02.949653 4703 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b2c7636_4ea7_4d61_9219_873ab28aa505.slice/crio-e128946526eb2175075f1a0601ec019e014a1687950b6228c2e56f2911741391.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b2c7636_4ea7_4d61_9219_873ab28aa505.slice/crio-conmon-bd745ea6c869be409bdaa402f9e724e868a803788204b03ca22ab690204d1db5.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b2c7636_4ea7_4d61_9219_873ab28aa505.slice/crio-bd745ea6c869be409bdaa402f9e724e868a803788204b03ca22ab690204d1db5.scope\": RecentStats: unable to find data in memory cache]" Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.485357 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.590243 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-dns-swift-storage-0\") pod \"bea2808f-b06b-4388-865f-9cac9ca53857\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.592439 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-ovsdbserver-sb\") pod \"bea2808f-b06b-4388-865f-9cac9ca53857\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.592499 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t4zgz\" (UniqueName: \"kubernetes.io/projected/bea2808f-b06b-4388-865f-9cac9ca53857-kube-api-access-t4zgz\") pod \"bea2808f-b06b-4388-865f-9cac9ca53857\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.592540 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-config\") pod \"bea2808f-b06b-4388-865f-9cac9ca53857\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.592704 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-dns-svc\") pod \"bea2808f-b06b-4388-865f-9cac9ca53857\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.592738 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-ovsdbserver-nb\") pod \"bea2808f-b06b-4388-865f-9cac9ca53857\" (UID: \"bea2808f-b06b-4388-865f-9cac9ca53857\") " Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.599806 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bea2808f-b06b-4388-865f-9cac9ca53857-kube-api-access-t4zgz" (OuterVolumeSpecName: "kube-api-access-t4zgz") pod "bea2808f-b06b-4388-865f-9cac9ca53857" (UID: "bea2808f-b06b-4388-865f-9cac9ca53857"). InnerVolumeSpecName "kube-api-access-t4zgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.655338 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-config" (OuterVolumeSpecName: "config") pod "bea2808f-b06b-4388-865f-9cac9ca53857" (UID: "bea2808f-b06b-4388-865f-9cac9ca53857"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.656817 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bea2808f-b06b-4388-865f-9cac9ca53857" (UID: "bea2808f-b06b-4388-865f-9cac9ca53857"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.659392 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "bea2808f-b06b-4388-865f-9cac9ca53857" (UID: "bea2808f-b06b-4388-865f-9cac9ca53857"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.672042 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bea2808f-b06b-4388-865f-9cac9ca53857" (UID: "bea2808f-b06b-4388-865f-9cac9ca53857"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.676765 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bea2808f-b06b-4388-865f-9cac9ca53857" (UID: "bea2808f-b06b-4388-865f-9cac9ca53857"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.695934 4703 generic.go:334] "Generic (PLEG): container finished" podID="bea2808f-b06b-4388-865f-9cac9ca53857" containerID="2afe21d8bff3648d5aadae798fd8612bf1d4bd06128b70afbd57129731c29508" exitCode=0 Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.696194 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-hjndc" event={"ID":"bea2808f-b06b-4388-865f-9cac9ca53857","Type":"ContainerDied","Data":"2afe21d8bff3648d5aadae798fd8612bf1d4bd06128b70afbd57129731c29508"} Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.696350 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764c5664d7-hjndc" Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.696544 4703 scope.go:117] "RemoveContainer" containerID="2afe21d8bff3648d5aadae798fd8612bf1d4bd06128b70afbd57129731c29508" Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.696526 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764c5664d7-hjndc" event={"ID":"bea2808f-b06b-4388-865f-9cac9ca53857","Type":"ContainerDied","Data":"c48d230221ff732c55713b4c37c1e074b3d37a47e08d3a1ef0ea885528c934cf"} Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.707619 4703 generic.go:334] "Generic (PLEG): container finished" podID="7b2c7636-4ea7-4d61-9219-873ab28aa505" containerID="e128946526eb2175075f1a0601ec019e014a1687950b6228c2e56f2911741391" exitCode=143 Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.707649 4703 generic.go:334] "Generic (PLEG): container finished" podID="7b2c7636-4ea7-4d61-9219-873ab28aa505" containerID="bd745ea6c869be409bdaa402f9e724e868a803788204b03ca22ab690204d1db5" exitCode=143 Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.707679 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7b2c7636-4ea7-4d61-9219-873ab28aa505","Type":"ContainerDied","Data":"e128946526eb2175075f1a0601ec019e014a1687950b6228c2e56f2911741391"} Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.710468 4703 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.710516 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.710535 4703 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.710551 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.710565 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t4zgz\" (UniqueName: \"kubernetes.io/projected/bea2808f-b06b-4388-865f-9cac9ca53857-kube-api-access-t4zgz\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.710581 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bea2808f-b06b-4388-865f-9cac9ca53857-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.710673 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7b2c7636-4ea7-4d61-9219-873ab28aa505","Type":"ContainerDied","Data":"bd745ea6c869be409bdaa402f9e724e868a803788204b03ca22ab690204d1db5"} Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.711882 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="2331b62f-b8c2-4a35-b7d9-debf6073d98d" containerName="glance-log" containerID="cri-o://a5deef2cba61430c89a34c4bd9c1840d91254e070c680c12329c9ef8981e767b" gracePeriod=30 Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.711977 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="2331b62f-b8c2-4a35-b7d9-debf6073d98d" containerName="glance-httpd" containerID="cri-o://1c49598ce207ab5e2b91b7d3e1ff49344bfe38d72a42aa59b7368f91110d0b0d" gracePeriod=30 Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.780676 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=13.780637385 podStartE2EDuration="13.780637385s" podCreationTimestamp="2026-01-30 12:18:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:19:03.748758527 +0000 UTC m=+1379.526580211" watchObservedRunningTime="2026-01-30 12:19:03.780637385 +0000 UTC m=+1379.558459039" Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.834118 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-hjndc"] Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.856637 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-764c5664d7-hjndc"] Jan 30 12:19:03 crc kubenswrapper[4703]: I0130 12:19:03.866442 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5f9958979d-8h859"] Jan 30 12:19:04 crc kubenswrapper[4703]: I0130 12:19:04.107381 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-77fb4cf9b8-pw692"] Jan 30 12:19:04 crc kubenswrapper[4703]: I0130 12:19:04.725811 4703 generic.go:334] "Generic (PLEG): container finished" podID="2331b62f-b8c2-4a35-b7d9-debf6073d98d" containerID="1c49598ce207ab5e2b91b7d3e1ff49344bfe38d72a42aa59b7368f91110d0b0d" exitCode=0 Jan 30 12:19:04 crc kubenswrapper[4703]: I0130 12:19:04.726242 4703 generic.go:334] "Generic (PLEG): container finished" podID="2331b62f-b8c2-4a35-b7d9-debf6073d98d" containerID="a5deef2cba61430c89a34c4bd9c1840d91254e070c680c12329c9ef8981e767b" exitCode=143 Jan 30 12:19:04 crc kubenswrapper[4703]: I0130 12:19:04.725909 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2331b62f-b8c2-4a35-b7d9-debf6073d98d","Type":"ContainerDied","Data":"1c49598ce207ab5e2b91b7d3e1ff49344bfe38d72a42aa59b7368f91110d0b0d"} Jan 30 12:19:04 crc kubenswrapper[4703]: I0130 12:19:04.726298 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2331b62f-b8c2-4a35-b7d9-debf6073d98d","Type":"ContainerDied","Data":"a5deef2cba61430c89a34c4bd9c1840d91254e070c680c12329c9ef8981e767b"} Jan 30 12:19:05 crc kubenswrapper[4703]: I0130 12:19:05.105398 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bea2808f-b06b-4388-865f-9cac9ca53857" path="/var/lib/kubelet/pods/bea2808f-b06b-4388-865f-9cac9ca53857/volumes" Jan 30 12:19:05 crc kubenswrapper[4703]: I0130 12:19:05.760662 4703 generic.go:334] "Generic (PLEG): container finished" podID="ba6892f1-1087-4650-b6fe-701fc48c94f7" containerID="5d4de2c96495b8e480e0ca04a0935c3e72b6eaf5f09e6e682c24a701d9048831" exitCode=0 Jan 30 12:19:05 crc kubenswrapper[4703]: I0130 12:19:05.760847 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dswjd" event={"ID":"ba6892f1-1087-4650-b6fe-701fc48c94f7","Type":"ContainerDied","Data":"5d4de2c96495b8e480e0ca04a0935c3e72b6eaf5f09e6e682c24a701d9048831"} Jan 30 12:19:05 crc kubenswrapper[4703]: I0130 12:19:05.764873 4703 generic.go:334] "Generic (PLEG): container finished" podID="cd3bf27b-46bc-468e-8735-3a3ed6eda272" containerID="1cf4f25cf82f53ff27b32327a97dd0d804b3013f6aecfdf31873c1098401e568" exitCode=0 Jan 30 12:19:05 crc kubenswrapper[4703]: I0130 12:19:05.764919 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-vhshs" event={"ID":"cd3bf27b-46bc-468e-8735-3a3ed6eda272","Type":"ContainerDied","Data":"1cf4f25cf82f53ff27b32327a97dd0d804b3013f6aecfdf31873c1098401e568"} Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.388273 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-vhshs" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.397987 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.537792 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b2c7636-4ea7-4d61-9219-873ab28aa505-logs\") pod \"7b2c7636-4ea7-4d61-9219-873ab28aa505\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.538282 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7b2c7636-4ea7-4d61-9219-873ab28aa505-httpd-run\") pod \"7b2c7636-4ea7-4d61-9219-873ab28aa505\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.538531 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-config-data\") pod \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\" (UID: \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\") " Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.538566 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-combined-ca-bundle\") pod \"7b2c7636-4ea7-4d61-9219-873ab28aa505\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.538599 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-config-data\") pod \"7b2c7636-4ea7-4d61-9219-873ab28aa505\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.538623 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-internal-tls-certs\") pod \"7b2c7636-4ea7-4d61-9219-873ab28aa505\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.538648 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-db-sync-config-data\") pod \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\" (UID: \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\") " Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.538705 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x94qw\" (UniqueName: \"kubernetes.io/projected/7b2c7636-4ea7-4d61-9219-873ab28aa505-kube-api-access-x94qw\") pod \"7b2c7636-4ea7-4d61-9219-873ab28aa505\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.538750 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-combined-ca-bundle\") pod \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\" (UID: \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\") " Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.538788 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-scripts\") pod \"7b2c7636-4ea7-4d61-9219-873ab28aa505\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.538822 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b5jdv\" (UniqueName: \"kubernetes.io/projected/cd3bf27b-46bc-468e-8735-3a3ed6eda272-kube-api-access-b5jdv\") pod \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\" (UID: \"cd3bf27b-46bc-468e-8735-3a3ed6eda272\") " Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.538848 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"7b2c7636-4ea7-4d61-9219-873ab28aa505\" (UID: \"7b2c7636-4ea7-4d61-9219-873ab28aa505\") " Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.538912 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b2c7636-4ea7-4d61-9219-873ab28aa505-logs" (OuterVolumeSpecName: "logs") pod "7b2c7636-4ea7-4d61-9219-873ab28aa505" (UID: "7b2c7636-4ea7-4d61-9219-873ab28aa505"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.539393 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7b2c7636-4ea7-4d61-9219-873ab28aa505-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.539836 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b2c7636-4ea7-4d61-9219-873ab28aa505-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "7b2c7636-4ea7-4d61-9219-873ab28aa505" (UID: "7b2c7636-4ea7-4d61-9219-873ab28aa505"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.547628 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b2c7636-4ea7-4d61-9219-873ab28aa505-kube-api-access-x94qw" (OuterVolumeSpecName: "kube-api-access-x94qw") pod "7b2c7636-4ea7-4d61-9219-873ab28aa505" (UID: "7b2c7636-4ea7-4d61-9219-873ab28aa505"). InnerVolumeSpecName "kube-api-access-x94qw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.549483 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "7b2c7636-4ea7-4d61-9219-873ab28aa505" (UID: "7b2c7636-4ea7-4d61-9219-873ab28aa505"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.549599 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-scripts" (OuterVolumeSpecName: "scripts") pod "7b2c7636-4ea7-4d61-9219-873ab28aa505" (UID: "7b2c7636-4ea7-4d61-9219-873ab28aa505"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.549770 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd3bf27b-46bc-468e-8735-3a3ed6eda272-kube-api-access-b5jdv" (OuterVolumeSpecName: "kube-api-access-b5jdv") pod "cd3bf27b-46bc-468e-8735-3a3ed6eda272" (UID: "cd3bf27b-46bc-468e-8735-3a3ed6eda272"). InnerVolumeSpecName "kube-api-access-b5jdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.554301 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "cd3bf27b-46bc-468e-8735-3a3ed6eda272" (UID: "cd3bf27b-46bc-468e-8735-3a3ed6eda272"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.581739 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7b2c7636-4ea7-4d61-9219-873ab28aa505" (UID: "7b2c7636-4ea7-4d61-9219-873ab28aa505"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.588262 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cd3bf27b-46bc-468e-8735-3a3ed6eda272" (UID: "cd3bf27b-46bc-468e-8735-3a3ed6eda272"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.605390 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "7b2c7636-4ea7-4d61-9219-873ab28aa505" (UID: "7b2c7636-4ea7-4d61-9219-873ab28aa505"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.611807 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-config-data" (OuterVolumeSpecName: "config-data") pod "cd3bf27b-46bc-468e-8735-3a3ed6eda272" (UID: "cd3bf27b-46bc-468e-8735-3a3ed6eda272"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.635942 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-config-data" (OuterVolumeSpecName: "config-data") pod "7b2c7636-4ea7-4d61-9219-873ab28aa505" (UID: "7b2c7636-4ea7-4d61-9219-873ab28aa505"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.641871 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.642145 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.642218 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.642274 4703 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.642358 4703 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.642416 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x94qw\" (UniqueName: \"kubernetes.io/projected/7b2c7636-4ea7-4d61-9219-873ab28aa505-kube-api-access-x94qw\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.642471 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd3bf27b-46bc-468e-8735-3a3ed6eda272-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.642547 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7b2c7636-4ea7-4d61-9219-873ab28aa505-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.642605 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b5jdv\" (UniqueName: \"kubernetes.io/projected/cd3bf27b-46bc-468e-8735-3a3ed6eda272-kube-api-access-b5jdv\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.642721 4703 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.642807 4703 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7b2c7636-4ea7-4d61-9219-873ab28aa505-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.676941 4703 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 30 12:19:12 crc kubenswrapper[4703]: I0130 12:19:12.744681 4703 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.060692 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-db-sync-vhshs" event={"ID":"cd3bf27b-46bc-468e-8735-3a3ed6eda272","Type":"ContainerDied","Data":"3732b114ad4c2bda7860ef9f2f8057f29fff0cb8e46d0eb1d9e10ece925eb8ff"} Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.060747 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3732b114ad4c2bda7860ef9f2f8057f29fff0cb8e46d0eb1d9e10ece925eb8ff" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.060850 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-db-sync-vhshs" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.065755 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"7b2c7636-4ea7-4d61-9219-873ab28aa505","Type":"ContainerDied","Data":"29f3626e7d57e86c28a7291d2cb497ebeb53972506c7425273d7b355d198fb59"} Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.065871 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.135977 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.147428 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.172891 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 30 12:19:13 crc kubenswrapper[4703]: E0130 12:19:13.174006 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd3bf27b-46bc-468e-8735-3a3ed6eda272" containerName="watcher-db-sync" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.174249 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd3bf27b-46bc-468e-8735-3a3ed6eda272" containerName="watcher-db-sync" Jan 30 12:19:13 crc kubenswrapper[4703]: E0130 12:19:13.174329 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bea2808f-b06b-4388-865f-9cac9ca53857" containerName="init" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.174385 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="bea2808f-b06b-4388-865f-9cac9ca53857" containerName="init" Jan 30 12:19:13 crc kubenswrapper[4703]: E0130 12:19:13.174443 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bea2808f-b06b-4388-865f-9cac9ca53857" containerName="dnsmasq-dns" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.174522 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="bea2808f-b06b-4388-865f-9cac9ca53857" containerName="dnsmasq-dns" Jan 30 12:19:13 crc kubenswrapper[4703]: E0130 12:19:13.174605 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b2c7636-4ea7-4d61-9219-873ab28aa505" containerName="glance-httpd" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.174667 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b2c7636-4ea7-4d61-9219-873ab28aa505" containerName="glance-httpd" Jan 30 12:19:13 crc kubenswrapper[4703]: E0130 12:19:13.174763 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b2c7636-4ea7-4d61-9219-873ab28aa505" containerName="glance-log" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.175025 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b2c7636-4ea7-4d61-9219-873ab28aa505" containerName="glance-log" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.175468 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b2c7636-4ea7-4d61-9219-873ab28aa505" containerName="glance-httpd" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.175741 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="bea2808f-b06b-4388-865f-9cac9ca53857" containerName="dnsmasq-dns" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.175829 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd3bf27b-46bc-468e-8735-3a3ed6eda272" containerName="watcher-db-sync" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.175959 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b2c7636-4ea7-4d61-9219-873ab28aa505" containerName="glance-log" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.177740 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.184998 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.188723 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.192704 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 30 12:19:13 crc kubenswrapper[4703]: E0130 12:19:13.354039 4703 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b2c7636_4ea7_4d61_9219_873ab28aa505.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd3bf27b_46bc_468e_8735_3a3ed6eda272.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7b2c7636_4ea7_4d61_9219_873ab28aa505.slice/crio-29f3626e7d57e86c28a7291d2cb497ebeb53972506c7425273d7b355d198fb59\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd3bf27b_46bc_468e_8735_3a3ed6eda272.slice/crio-3732b114ad4c2bda7860ef9f2f8057f29fff0cb8e46d0eb1d9e10ece925eb8ff\": RecentStats: unable to find data in memory cache]" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.367849 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e06db1b-bded-42ad-bd1e-8f7d76faf399-logs\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.368295 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2e06db1b-bded-42ad-bd1e-8f7d76faf399-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.368422 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4jq5\" (UniqueName: \"kubernetes.io/projected/2e06db1b-bded-42ad-bd1e-8f7d76faf399-kube-api-access-q4jq5\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.368520 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.368675 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.368924 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.369048 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.369222 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.477460 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.477660 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.477982 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.485518 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e06db1b-bded-42ad-bd1e-8f7d76faf399-logs\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.485600 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2e06db1b-bded-42ad-bd1e-8f7d76faf399-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.485637 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4jq5\" (UniqueName: \"kubernetes.io/projected/2e06db1b-bded-42ad-bd1e-8f7d76faf399-kube-api-access-q4jq5\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.485664 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.485795 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.486080 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.486874 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2e06db1b-bded-42ad-bd1e-8f7d76faf399-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.487279 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e06db1b-bded-42ad-bd1e-8f7d76faf399-logs\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.496735 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.498377 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.503355 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.519421 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.524504 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4jq5\" (UniqueName: \"kubernetes.io/projected/2e06db1b-bded-42ad-bd1e-8f7d76faf399-kube-api-access-q4jq5\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.543644 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:19:13 crc kubenswrapper[4703]: I0130 12:19:13.608896 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.058915 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.071246 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.077931 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-watcher-dockercfg-tt2z5" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.078066 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.110505 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.150367 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.152069 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.160734 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.211811 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-applier-0"] Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.214010 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.217779 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-applier-config-data" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.227447 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-config-data\") pod \"watcher-api-0\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " pod="openstack/watcher-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.227514 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41238b51-342b-4a59-b098-01f716f7a865-logs\") pod \"watcher-api-0\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " pod="openstack/watcher-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.227546 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5msjb\" (UniqueName: \"kubernetes.io/projected/41238b51-342b-4a59-b098-01f716f7a865-kube-api-access-5msjb\") pod \"watcher-api-0\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " pod="openstack/watcher-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.228207 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " pod="openstack/watcher-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.231929 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.234795 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " pod="openstack/watcher-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.248942 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.337641 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.337762 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.337848 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76d9l\" (UniqueName: \"kubernetes.io/projected/26df44bd-bd05-4ed3-b146-fa1111db982e-kube-api-access-76d9l\") pod \"watcher-decision-engine-0\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.337910 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " pod="openstack/watcher-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.339186 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/78484d4c-53f4-4790-965f-2b8c22d7a9ce-logs\") pod \"watcher-applier-0\" (UID: \"78484d4c-53f4-4790-965f-2b8c22d7a9ce\") " pod="openstack/watcher-applier-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.339271 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78484d4c-53f4-4790-965f-2b8c22d7a9ce-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"78484d4c-53f4-4790-965f-2b8c22d7a9ce\") " pod="openstack/watcher-applier-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.339298 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " pod="openstack/watcher-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.339403 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-config-data\") pod \"watcher-decision-engine-0\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.339562 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78484d4c-53f4-4790-965f-2b8c22d7a9ce-config-data\") pod \"watcher-applier-0\" (UID: \"78484d4c-53f4-4790-965f-2b8c22d7a9ce\") " pod="openstack/watcher-applier-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.339583 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26df44bd-bd05-4ed3-b146-fa1111db982e-logs\") pod \"watcher-decision-engine-0\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.339626 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-config-data\") pod \"watcher-api-0\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " pod="openstack/watcher-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.340048 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41238b51-342b-4a59-b098-01f716f7a865-logs\") pod \"watcher-api-0\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " pod="openstack/watcher-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.340105 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5msjb\" (UniqueName: \"kubernetes.io/projected/41238b51-342b-4a59-b098-01f716f7a865-kube-api-access-5msjb\") pod \"watcher-api-0\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " pod="openstack/watcher-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.340176 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhxzn\" (UniqueName: \"kubernetes.io/projected/78484d4c-53f4-4790-965f-2b8c22d7a9ce-kube-api-access-hhxzn\") pod \"watcher-applier-0\" (UID: \"78484d4c-53f4-4790-965f-2b8c22d7a9ce\") " pod="openstack/watcher-applier-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.340888 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41238b51-342b-4a59-b098-01f716f7a865-logs\") pod \"watcher-api-0\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " pod="openstack/watcher-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.345957 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " pod="openstack/watcher-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.346351 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-config-data\") pod \"watcher-api-0\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " pod="openstack/watcher-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.347084 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " pod="openstack/watcher-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.368160 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5msjb\" (UniqueName: \"kubernetes.io/projected/41238b51-342b-4a59-b098-01f716f7a865-kube-api-access-5msjb\") pod \"watcher-api-0\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " pod="openstack/watcher-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.425247 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.442833 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.442925 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.442967 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76d9l\" (UniqueName: \"kubernetes.io/projected/26df44bd-bd05-4ed3-b146-fa1111db982e-kube-api-access-76d9l\") pod \"watcher-decision-engine-0\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.443035 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/78484d4c-53f4-4790-965f-2b8c22d7a9ce-logs\") pod \"watcher-applier-0\" (UID: \"78484d4c-53f4-4790-965f-2b8c22d7a9ce\") " pod="openstack/watcher-applier-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.443077 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78484d4c-53f4-4790-965f-2b8c22d7a9ce-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"78484d4c-53f4-4790-965f-2b8c22d7a9ce\") " pod="openstack/watcher-applier-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.443144 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-config-data\") pod \"watcher-decision-engine-0\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.443193 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78484d4c-53f4-4790-965f-2b8c22d7a9ce-config-data\") pod \"watcher-applier-0\" (UID: \"78484d4c-53f4-4790-965f-2b8c22d7a9ce\") " pod="openstack/watcher-applier-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.443209 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26df44bd-bd05-4ed3-b146-fa1111db982e-logs\") pod \"watcher-decision-engine-0\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.443252 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhxzn\" (UniqueName: \"kubernetes.io/projected/78484d4c-53f4-4790-965f-2b8c22d7a9ce-kube-api-access-hhxzn\") pod \"watcher-applier-0\" (UID: \"78484d4c-53f4-4790-965f-2b8c22d7a9ce\") " pod="openstack/watcher-applier-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.444320 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26df44bd-bd05-4ed3-b146-fa1111db982e-logs\") pod \"watcher-decision-engine-0\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.447057 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.447649 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/78484d4c-53f4-4790-965f-2b8c22d7a9ce-logs\") pod \"watcher-applier-0\" (UID: \"78484d4c-53f4-4790-965f-2b8c22d7a9ce\") " pod="openstack/watcher-applier-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.448394 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78484d4c-53f4-4790-965f-2b8c22d7a9ce-combined-ca-bundle\") pod \"watcher-applier-0\" (UID: \"78484d4c-53f4-4790-965f-2b8c22d7a9ce\") " pod="openstack/watcher-applier-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.453258 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78484d4c-53f4-4790-965f-2b8c22d7a9ce-config-data\") pod \"watcher-applier-0\" (UID: \"78484d4c-53f4-4790-965f-2b8c22d7a9ce\") " pod="openstack/watcher-applier-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.453481 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.460795 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-config-data\") pod \"watcher-decision-engine-0\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.469146 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhxzn\" (UniqueName: \"kubernetes.io/projected/78484d4c-53f4-4790-965f-2b8c22d7a9ce-kube-api-access-hhxzn\") pod \"watcher-applier-0\" (UID: \"78484d4c-53f4-4790-965f-2b8c22d7a9ce\") " pod="openstack/watcher-applier-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.471485 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76d9l\" (UniqueName: \"kubernetes.io/projected/26df44bd-bd05-4ed3-b146-fa1111db982e-kube-api-access-76d9l\") pod \"watcher-decision-engine-0\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.500983 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 30 12:19:14 crc kubenswrapper[4703]: I0130 12:19:14.540419 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-applier-0" Jan 30 12:19:15 crc kubenswrapper[4703]: I0130 12:19:15.100981 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b2c7636-4ea7-4d61-9219-873ab28aa505" path="/var/lib/kubelet/pods/7b2c7636-4ea7-4d61-9219-873ab28aa505/volumes" Jan 30 12:19:21 crc kubenswrapper[4703]: I0130 12:19:21.878555 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 30 12:19:21 crc kubenswrapper[4703]: I0130 12:19:21.879112 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 30 12:19:22 crc kubenswrapper[4703]: E0130 12:19:22.762601 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Jan 30 12:19:22 crc kubenswrapper[4703]: E0130 12:19:22.762962 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7b9n9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-mmqwb_openstack(273a2195-27a6-4229-b3c0-8c67d9fc4302): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:19:22 crc kubenswrapper[4703]: E0130 12:19:22.766599 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-mmqwb" podUID="273a2195-27a6-4229-b3c0-8c67d9fc4302" Jan 30 12:19:23 crc kubenswrapper[4703]: E0130 12:19:23.480740 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-mmqwb" podUID="273a2195-27a6-4229-b3c0-8c67d9fc4302" Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.130091 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.298468 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-config-data\") pod \"ba6892f1-1087-4650-b6fe-701fc48c94f7\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.298981 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-scripts\") pod \"ba6892f1-1087-4650-b6fe-701fc48c94f7\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.299043 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-fernet-keys\") pod \"ba6892f1-1087-4650-b6fe-701fc48c94f7\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.299070 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-combined-ca-bundle\") pod \"ba6892f1-1087-4650-b6fe-701fc48c94f7\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.299112 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-credential-keys\") pod \"ba6892f1-1087-4650-b6fe-701fc48c94f7\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.299188 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xnmt8\" (UniqueName: \"kubernetes.io/projected/ba6892f1-1087-4650-b6fe-701fc48c94f7-kube-api-access-xnmt8\") pod \"ba6892f1-1087-4650-b6fe-701fc48c94f7\" (UID: \"ba6892f1-1087-4650-b6fe-701fc48c94f7\") " Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.312667 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "ba6892f1-1087-4650-b6fe-701fc48c94f7" (UID: "ba6892f1-1087-4650-b6fe-701fc48c94f7"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.312755 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "ba6892f1-1087-4650-b6fe-701fc48c94f7" (UID: "ba6892f1-1087-4650-b6fe-701fc48c94f7"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.331941 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba6892f1-1087-4650-b6fe-701fc48c94f7-kube-api-access-xnmt8" (OuterVolumeSpecName: "kube-api-access-xnmt8") pod "ba6892f1-1087-4650-b6fe-701fc48c94f7" (UID: "ba6892f1-1087-4650-b6fe-701fc48c94f7"). InnerVolumeSpecName "kube-api-access-xnmt8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.332141 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-scripts" (OuterVolumeSpecName: "scripts") pod "ba6892f1-1087-4650-b6fe-701fc48c94f7" (UID: "ba6892f1-1087-4650-b6fe-701fc48c94f7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.347874 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ba6892f1-1087-4650-b6fe-701fc48c94f7" (UID: "ba6892f1-1087-4650-b6fe-701fc48c94f7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.382423 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-config-data" (OuterVolumeSpecName: "config-data") pod "ba6892f1-1087-4650-b6fe-701fc48c94f7" (UID: "ba6892f1-1087-4650-b6fe-701fc48c94f7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.404093 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.404222 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.404235 4703 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.404247 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.404262 4703 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ba6892f1-1087-4650-b6fe-701fc48c94f7-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.404299 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xnmt8\" (UniqueName: \"kubernetes.io/projected/ba6892f1-1087-4650-b6fe-701fc48c94f7-kube-api-access-xnmt8\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.504278 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-dswjd" Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.504464 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-dswjd" event={"ID":"ba6892f1-1087-4650-b6fe-701fc48c94f7","Type":"ContainerDied","Data":"cb799189d015b16b673cc317f47ab87a35228a617ae9afcd7dffdc4e91cafabe"} Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.504537 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cb799189d015b16b673cc317f47ab87a35228a617ae9afcd7dffdc4e91cafabe" Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.507701 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5f9958979d-8h859" event={"ID":"b888ea51-970d-4f4d-9e5c-f456ca173472","Type":"ContainerStarted","Data":"189e26c4eab30971a78885710777ee2e642a2ef52c043040fedd5dfcdddb7433"} Jan 30 12:19:25 crc kubenswrapper[4703]: I0130 12:19:25.512434 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77fb4cf9b8-pw692" event={"ID":"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a","Type":"ContainerStarted","Data":"e941d6ae1336ca46e0e38cde249a7ea833353c9f782e6dc6ec68c366f1e1c4f8"} Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.240512 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-dswjd"] Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.280941 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-dswjd"] Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.335194 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-8nnnq"] Jan 30 12:19:26 crc kubenswrapper[4703]: E0130 12:19:26.335822 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba6892f1-1087-4650-b6fe-701fc48c94f7" containerName="keystone-bootstrap" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.335853 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba6892f1-1087-4650-b6fe-701fc48c94f7" containerName="keystone-bootstrap" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.336114 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba6892f1-1087-4650-b6fe-701fc48c94f7" containerName="keystone-bootstrap" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.337476 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.344660 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.345196 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-cz6x7" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.345350 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.345468 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.345577 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.350522 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8nnnq"] Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.431434 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-combined-ca-bundle\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.431527 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-scripts\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.431560 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ph2pq\" (UniqueName: \"kubernetes.io/projected/147c2570-9be4-4fb1-9789-f7bc204119db-kube-api-access-ph2pq\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.431753 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-credential-keys\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.432013 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-config-data\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.432206 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-fernet-keys\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.534943 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-combined-ca-bundle\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.535140 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-scripts\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.535182 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ph2pq\" (UniqueName: \"kubernetes.io/projected/147c2570-9be4-4fb1-9789-f7bc204119db-kube-api-access-ph2pq\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.535211 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-credential-keys\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.535290 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-config-data\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.535348 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-fernet-keys\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.542886 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-config-data\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.543275 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-credential-keys\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.545548 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-scripts\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.546073 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-combined-ca-bundle\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.548325 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-fernet-keys\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.553600 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ph2pq\" (UniqueName: \"kubernetes.io/projected/147c2570-9be4-4fb1-9789-f7bc204119db-kube-api-access-ph2pq\") pod \"keystone-bootstrap-8nnnq\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:26 crc kubenswrapper[4703]: I0130 12:19:26.665285 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:27 crc kubenswrapper[4703]: I0130 12:19:27.103260 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba6892f1-1087-4650-b6fe-701fc48c94f7" path="/var/lib/kubelet/pods/ba6892f1-1087-4650-b6fe-701fc48c94f7/volumes" Jan 30 12:19:34 crc kubenswrapper[4703]: I0130 12:19:34.739691 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-galera-0" podUID="f0ae0323-f870-408b-b688-df1b4e3e8da6" containerName="galera" probeResult="failure" output="command timed out" Jan 30 12:19:34 crc kubenswrapper[4703]: I0130 12:19:34.740322 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="f0ae0323-f870-408b-b688-df1b4e3e8da6" containerName="galera" probeResult="failure" output="command timed out" Jan 30 12:19:38 crc kubenswrapper[4703]: I0130 12:19:38.712142 4703 scope.go:117] "RemoveContainer" containerID="881d63959d37b039010b03fac3aae6db1de00b580950b3c6aeaa73e0435a1deb" Jan 30 12:19:40 crc kubenswrapper[4703]: E0130 12:19:40.584456 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Jan 30 12:19:40 crc kubenswrapper[4703]: E0130 12:19:40.585284 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xmlb5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-vkzk9_openstack(b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:19:40 crc kubenswrapper[4703]: E0130 12:19:40.587339 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-vkzk9" podUID="b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70" Jan 30 12:19:41 crc kubenswrapper[4703]: E0130 12:19:41.044532 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-vkzk9" podUID="b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70" Jan 30 12:19:41 crc kubenswrapper[4703]: E0130 12:19:41.203135 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Jan 30 12:19:41 crc kubenswrapper[4703]: E0130 12:19:41.203444 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nf6hd6h57chd8h5c6hd5h564h555h676h548h8bhc9h65h56h5b7h65ch56bh54bhb9h5fdh96h5d5h5cdh5bh75h659h697h55h54h68h648h598q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9gzxc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(572a81eb-68df-470a-9ca8-1febfc6167ad): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.310627 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.464561 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2331b62f-b8c2-4a35-b7d9-debf6073d98d-httpd-run\") pod \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.464751 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-config-data\") pod \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.465295 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2331b62f-b8c2-4a35-b7d9-debf6073d98d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "2331b62f-b8c2-4a35-b7d9-debf6073d98d" (UID: "2331b62f-b8c2-4a35-b7d9-debf6073d98d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.465344 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.465531 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-scripts\") pod \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.466035 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-combined-ca-bundle\") pod \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.466313 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5j2q\" (UniqueName: \"kubernetes.io/projected/2331b62f-b8c2-4a35-b7d9-debf6073d98d-kube-api-access-g5j2q\") pod \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.466491 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2331b62f-b8c2-4a35-b7d9-debf6073d98d-logs\") pod \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.466594 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-public-tls-certs\") pod \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\" (UID: \"2331b62f-b8c2-4a35-b7d9-debf6073d98d\") " Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.467011 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2331b62f-b8c2-4a35-b7d9-debf6073d98d-logs" (OuterVolumeSpecName: "logs") pod "2331b62f-b8c2-4a35-b7d9-debf6073d98d" (UID: "2331b62f-b8c2-4a35-b7d9-debf6073d98d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.468214 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2331b62f-b8c2-4a35-b7d9-debf6073d98d-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.468230 4703 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2331b62f-b8c2-4a35-b7d9-debf6073d98d-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.477058 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2331b62f-b8c2-4a35-b7d9-debf6073d98d-kube-api-access-g5j2q" (OuterVolumeSpecName: "kube-api-access-g5j2q") pod "2331b62f-b8c2-4a35-b7d9-debf6073d98d" (UID: "2331b62f-b8c2-4a35-b7d9-debf6073d98d"). InnerVolumeSpecName "kube-api-access-g5j2q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.477113 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-scripts" (OuterVolumeSpecName: "scripts") pod "2331b62f-b8c2-4a35-b7d9-debf6073d98d" (UID: "2331b62f-b8c2-4a35-b7d9-debf6073d98d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.477191 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "2331b62f-b8c2-4a35-b7d9-debf6073d98d" (UID: "2331b62f-b8c2-4a35-b7d9-debf6073d98d"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.538255 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2331b62f-b8c2-4a35-b7d9-debf6073d98d" (UID: "2331b62f-b8c2-4a35-b7d9-debf6073d98d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.571499 4703 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.571544 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.571556 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.571567 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5j2q\" (UniqueName: \"kubernetes.io/projected/2331b62f-b8c2-4a35-b7d9-debf6073d98d-kube-api-access-g5j2q\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.581399 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "2331b62f-b8c2-4a35-b7d9-debf6073d98d" (UID: "2331b62f-b8c2-4a35-b7d9-debf6073d98d"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.582948 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-config-data" (OuterVolumeSpecName: "config-data") pod "2331b62f-b8c2-4a35-b7d9-debf6073d98d" (UID: "2331b62f-b8c2-4a35-b7d9-debf6073d98d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.591683 4703 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.674700 4703 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.675279 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2331b62f-b8c2-4a35-b7d9-debf6073d98d-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:41 crc kubenswrapper[4703]: I0130 12:19:41.675295 4703 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.055318 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"2331b62f-b8c2-4a35-b7d9-debf6073d98d","Type":"ContainerDied","Data":"aca6b451d4b89578c280f22d268b86dc5b41f136db3868b7f69403e84a26460a"} Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.055474 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.131902 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.144296 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.158113 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 30 12:19:42 crc kubenswrapper[4703]: E0130 12:19:42.158852 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2331b62f-b8c2-4a35-b7d9-debf6073d98d" containerName="glance-log" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.158886 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2331b62f-b8c2-4a35-b7d9-debf6073d98d" containerName="glance-log" Jan 30 12:19:42 crc kubenswrapper[4703]: E0130 12:19:42.158912 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2331b62f-b8c2-4a35-b7d9-debf6073d98d" containerName="glance-httpd" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.158920 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2331b62f-b8c2-4a35-b7d9-debf6073d98d" containerName="glance-httpd" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.159189 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2331b62f-b8c2-4a35-b7d9-debf6073d98d" containerName="glance-httpd" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.159219 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2331b62f-b8c2-4a35-b7d9-debf6073d98d" containerName="glance-log" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.160772 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.166955 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.167475 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.171859 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.291530 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.291603 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.291691 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.291983 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.292107 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-logs\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.292258 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l65mq\" (UniqueName: \"kubernetes.io/projected/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-kube-api-access-l65mq\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.292497 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-scripts\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.292633 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-config-data\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.394440 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-logs\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.394573 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l65mq\" (UniqueName: \"kubernetes.io/projected/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-kube-api-access-l65mq\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.394613 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-scripts\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.394642 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-config-data\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.394724 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.394769 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.394799 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.394849 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.395171 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-logs\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.395239 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.395776 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.401462 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.401766 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-config-data\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.408827 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.415861 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-scripts\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.416471 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l65mq\" (UniqueName: \"kubernetes.io/projected/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-kube-api-access-l65mq\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.432332 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: I0130 12:19:42.490790 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 30 12:19:42 crc kubenswrapper[4703]: E0130 12:19:42.852551 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Jan 30 12:19:42 crc kubenswrapper[4703]: E0130 12:19:42.852776 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4xclk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-scrgs_openstack(1665113c-fcaa-4a13-9de2-552579864e44): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:19:42 crc kubenswrapper[4703]: E0130 12:19:42.854762 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-scrgs" podUID="1665113c-fcaa-4a13-9de2-552579864e44" Jan 30 12:19:43 crc kubenswrapper[4703]: I0130 12:19:43.028704 4703 scope.go:117] "RemoveContainer" containerID="2afe21d8bff3648d5aadae798fd8612bf1d4bd06128b70afbd57129731c29508" Jan 30 12:19:43 crc kubenswrapper[4703]: E0130 12:19:43.029827 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2afe21d8bff3648d5aadae798fd8612bf1d4bd06128b70afbd57129731c29508\": container with ID starting with 2afe21d8bff3648d5aadae798fd8612bf1d4bd06128b70afbd57129731c29508 not found: ID does not exist" containerID="2afe21d8bff3648d5aadae798fd8612bf1d4bd06128b70afbd57129731c29508" Jan 30 12:19:43 crc kubenswrapper[4703]: I0130 12:19:43.029897 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2afe21d8bff3648d5aadae798fd8612bf1d4bd06128b70afbd57129731c29508"} err="failed to get container status \"2afe21d8bff3648d5aadae798fd8612bf1d4bd06128b70afbd57129731c29508\": rpc error: code = NotFound desc = could not find container \"2afe21d8bff3648d5aadae798fd8612bf1d4bd06128b70afbd57129731c29508\": container with ID starting with 2afe21d8bff3648d5aadae798fd8612bf1d4bd06128b70afbd57129731c29508 not found: ID does not exist" Jan 30 12:19:43 crc kubenswrapper[4703]: I0130 12:19:43.029952 4703 scope.go:117] "RemoveContainer" containerID="881d63959d37b039010b03fac3aae6db1de00b580950b3c6aeaa73e0435a1deb" Jan 30 12:19:43 crc kubenswrapper[4703]: E0130 12:19:43.030618 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"881d63959d37b039010b03fac3aae6db1de00b580950b3c6aeaa73e0435a1deb\": container with ID starting with 881d63959d37b039010b03fac3aae6db1de00b580950b3c6aeaa73e0435a1deb not found: ID does not exist" containerID="881d63959d37b039010b03fac3aae6db1de00b580950b3c6aeaa73e0435a1deb" Jan 30 12:19:43 crc kubenswrapper[4703]: I0130 12:19:43.030712 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"881d63959d37b039010b03fac3aae6db1de00b580950b3c6aeaa73e0435a1deb"} err="failed to get container status \"881d63959d37b039010b03fac3aae6db1de00b580950b3c6aeaa73e0435a1deb\": rpc error: code = NotFound desc = could not find container \"881d63959d37b039010b03fac3aae6db1de00b580950b3c6aeaa73e0435a1deb\": container with ID starting with 881d63959d37b039010b03fac3aae6db1de00b580950b3c6aeaa73e0435a1deb not found: ID does not exist" Jan 30 12:19:43 crc kubenswrapper[4703]: I0130 12:19:43.030745 4703 scope.go:117] "RemoveContainer" containerID="e128946526eb2175075f1a0601ec019e014a1687950b6228c2e56f2911741391" Jan 30 12:19:43 crc kubenswrapper[4703]: E0130 12:19:43.074426 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-scrgs" podUID="1665113c-fcaa-4a13-9de2-552579864e44" Jan 30 12:19:43 crc kubenswrapper[4703]: I0130 12:19:43.105713 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2331b62f-b8c2-4a35-b7d9-debf6073d98d" path="/var/lib/kubelet/pods/2331b62f-b8c2-4a35-b7d9-debf6073d98d/volumes" Jan 30 12:19:43 crc kubenswrapper[4703]: I0130 12:19:43.180882 4703 scope.go:117] "RemoveContainer" containerID="bd745ea6c869be409bdaa402f9e724e868a803788204b03ca22ab690204d1db5" Jan 30 12:19:43 crc kubenswrapper[4703]: I0130 12:19:43.246758 4703 scope.go:117] "RemoveContainer" containerID="1c49598ce207ab5e2b91b7d3e1ff49344bfe38d72a42aa59b7368f91110d0b0d" Jan 30 12:19:43 crc kubenswrapper[4703]: I0130 12:19:43.435023 4703 scope.go:117] "RemoveContainer" containerID="a5deef2cba61430c89a34c4bd9c1840d91254e070c680c12329c9ef8981e767b" Jan 30 12:19:43 crc kubenswrapper[4703]: I0130 12:19:43.797962 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-8nnnq"] Jan 30 12:19:43 crc kubenswrapper[4703]: I0130 12:19:43.813594 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 30 12:19:43 crc kubenswrapper[4703]: I0130 12:19:43.924197 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-applier-0"] Jan 30 12:19:43 crc kubenswrapper[4703]: I0130 12:19:43.962759 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Jan 30 12:19:44 crc kubenswrapper[4703]: I0130 12:19:44.068631 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 30 12:19:44 crc kubenswrapper[4703]: I0130 12:19:44.084791 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77fb4cf9b8-pw692" event={"ID":"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a","Type":"ContainerStarted","Data":"e2d501407f92428385c66f6cc0b6bae66e49be405294eebaa2d5ac368124717d"} Jan 30 12:19:44 crc kubenswrapper[4703]: I0130 12:19:44.095554 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-mmqwb" event={"ID":"273a2195-27a6-4229-b3c0-8c67d9fc4302","Type":"ContainerStarted","Data":"3a38b773280de96cef8739a5579de3e9421f01c64a9e3befcc4c24a982f1eead"} Jan 30 12:19:44 crc kubenswrapper[4703]: I0130 12:19:44.100644 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5bbf599fb5-drlk7" event={"ID":"2ac378ee-c96a-41cb-881b-0100056b27ad","Type":"ContainerStarted","Data":"8dc1baf060a43497da0bae36974ca6a96708b479d00c21dddb4892a9ac9ef5b5"} Jan 30 12:19:44 crc kubenswrapper[4703]: I0130 12:19:44.105431 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5f9958979d-8h859" event={"ID":"b888ea51-970d-4f4d-9e5c-f456ca173472","Type":"ContainerStarted","Data":"515f8dda2bc0f9be8dcdb24b41cdf2299f04b0afc161bd211413cc06590da029"} Jan 30 12:19:44 crc kubenswrapper[4703]: I0130 12:19:44.107700 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-594746f8cc-b7spb" event={"ID":"6ea69f39-6623-4a6e-adff-6014bb7c749b","Type":"ContainerStarted","Data":"b36c10043d0ee087fff4101d27a6cffd2fd7a36b90013c6d1dc568e780c494cc"} Jan 30 12:19:44 crc kubenswrapper[4703]: I0130 12:19:44.109227 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85698bccdf-jvv85" event={"ID":"f15d17d2-e8d6-49c8-a313-d815865271f0","Type":"ContainerStarted","Data":"c9d34830c6c9528c2db9a2b08e868b06714aeb34fe386462412ba5b2fc0fcb0b"} Jan 30 12:19:44 crc kubenswrapper[4703]: W0130 12:19:44.331621 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d5e864b_4ad4_447d_8b20_6d9999cea7bb.slice/crio-49f761ff573444b9b2457ee44bf53aa66e2e8d2f5bf577ffbdb31b4e4a4ae16c WatchSource:0}: Error finding container 49f761ff573444b9b2457ee44bf53aa66e2e8d2f5bf577ffbdb31b4e4a4ae16c: Status 404 returned error can't find the container with id 49f761ff573444b9b2457ee44bf53aa66e2e8d2f5bf577ffbdb31b4e4a4ae16c Jan 30 12:19:44 crc kubenswrapper[4703]: W0130 12:19:44.334630 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod26df44bd_bd05_4ed3_b146_fa1111db982e.slice/crio-c6483ee91692fbc07589167785b8ec10c441b37a93cbade02ab25644abdfad91 WatchSource:0}: Error finding container c6483ee91692fbc07589167785b8ec10c441b37a93cbade02ab25644abdfad91: Status 404 returned error can't find the container with id c6483ee91692fbc07589167785b8ec10c441b37a93cbade02ab25644abdfad91 Jan 30 12:19:44 crc kubenswrapper[4703]: W0130 12:19:44.339365 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod78484d4c_53f4_4790_965f_2b8c22d7a9ce.slice/crio-1aa2e9a2b283f5de453d51cbccd7edf7c87960ddf507495c2f20fe26e4a1ff8f WatchSource:0}: Error finding container 1aa2e9a2b283f5de453d51cbccd7edf7c87960ddf507495c2f20fe26e4a1ff8f: Status 404 returned error can't find the container with id 1aa2e9a2b283f5de453d51cbccd7edf7c87960ddf507495c2f20fe26e4a1ff8f Jan 30 12:19:45 crc kubenswrapper[4703]: I0130 12:19:45.004136 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-mmqwb" podStartSLOduration=4.71444253 podStartE2EDuration="55.004091516s" podCreationTimestamp="2026-01-30 12:18:50 +0000 UTC" firstStartedPulling="2026-01-30 12:18:52.804345391 +0000 UTC m=+1368.582167045" lastFinishedPulling="2026-01-30 12:19:43.093994377 +0000 UTC m=+1418.871816031" observedRunningTime="2026-01-30 12:19:44.118234658 +0000 UTC m=+1419.896056322" watchObservedRunningTime="2026-01-30 12:19:45.004091516 +0000 UTC m=+1420.781913180" Jan 30 12:19:45 crc kubenswrapper[4703]: I0130 12:19:45.004974 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 30 12:19:45 crc kubenswrapper[4703]: I0130 12:19:45.169050 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3d5e864b-4ad4-447d-8b20-6d9999cea7bb","Type":"ContainerStarted","Data":"49f761ff573444b9b2457ee44bf53aa66e2e8d2f5bf577ffbdb31b4e4a4ae16c"} Jan 30 12:19:45 crc kubenswrapper[4703]: I0130 12:19:45.177565 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"78484d4c-53f4-4790-965f-2b8c22d7a9ce","Type":"ContainerStarted","Data":"1aa2e9a2b283f5de453d51cbccd7edf7c87960ddf507495c2f20fe26e4a1ff8f"} Jan 30 12:19:45 crc kubenswrapper[4703]: I0130 12:19:45.183230 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"41238b51-342b-4a59-b098-01f716f7a865","Type":"ContainerStarted","Data":"c276dd1cf7e3ed08da8320c83c94bd7e506f2ae962be81a3c7d0f78277d62ddd"} Jan 30 12:19:45 crc kubenswrapper[4703]: W0130 12:19:45.200330 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2e06db1b_bded_42ad_bd1e_8f7d76faf399.slice/crio-48aa4aedf0473662770adcc3cba881b2628ca96e26e186984c656a8fe6a6637d WatchSource:0}: Error finding container 48aa4aedf0473662770adcc3cba881b2628ca96e26e186984c656a8fe6a6637d: Status 404 returned error can't find the container with id 48aa4aedf0473662770adcc3cba881b2628ca96e26e186984c656a8fe6a6637d Jan 30 12:19:45 crc kubenswrapper[4703]: I0130 12:19:45.201366 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"26df44bd-bd05-4ed3-b146-fa1111db982e","Type":"ContainerStarted","Data":"c6483ee91692fbc07589167785b8ec10c441b37a93cbade02ab25644abdfad91"} Jan 30 12:19:45 crc kubenswrapper[4703]: I0130 12:19:45.216956 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8nnnq" event={"ID":"147c2570-9be4-4fb1-9789-f7bc204119db","Type":"ContainerStarted","Data":"a087cfabfbe230a4e382cb0d1c5d53987df3f81f6dfcb8f058109e5488568eef"} Jan 30 12:19:45 crc kubenswrapper[4703]: I0130 12:19:45.232611 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-95pfb" event={"ID":"bd8fdb9b-147d-4634-a312-719f1c62c7ff","Type":"ContainerDied","Data":"6dcbea808c1e6ab36bb68a56e173af69dedb9e73ac24c2601a2deb7572065251"} Jan 30 12:19:45 crc kubenswrapper[4703]: I0130 12:19:45.243287 4703 generic.go:334] "Generic (PLEG): container finished" podID="bd8fdb9b-147d-4634-a312-719f1c62c7ff" containerID="6dcbea808c1e6ab36bb68a56e173af69dedb9e73ac24c2601a2deb7572065251" exitCode=0 Jan 30 12:19:46 crc kubenswrapper[4703]: I0130 12:19:46.260176 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"41238b51-342b-4a59-b098-01f716f7a865","Type":"ContainerStarted","Data":"158f5fe6d1c7b33cb21d36308c61f42bbb8ebc6e9fd9564ec1ab6f6dbeeefdf3"} Jan 30 12:19:46 crc kubenswrapper[4703]: I0130 12:19:46.263509 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8nnnq" event={"ID":"147c2570-9be4-4fb1-9789-f7bc204119db","Type":"ContainerStarted","Data":"b8cecdf7f5f56768b007344414621396ae5457fcf96b89f2d2cbae7fa9ba8cdb"} Jan 30 12:19:46 crc kubenswrapper[4703]: I0130 12:19:46.267062 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5f9958979d-8h859" event={"ID":"b888ea51-970d-4f4d-9e5c-f456ca173472","Type":"ContainerStarted","Data":"44bd0eeab3c7b4371ebc41e5f7618026824b17804f7a8c0f1225abd278d6ca11"} Jan 30 12:19:46 crc kubenswrapper[4703]: I0130 12:19:46.274220 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-594746f8cc-b7spb" event={"ID":"6ea69f39-6623-4a6e-adff-6014bb7c749b","Type":"ContainerStarted","Data":"485b4e337950b032012f2a3276539bbaeec91bf21585acbd7dd0e778d654de83"} Jan 30 12:19:46 crc kubenswrapper[4703]: I0130 12:19:46.278702 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85698bccdf-jvv85" event={"ID":"f15d17d2-e8d6-49c8-a313-d815865271f0","Type":"ContainerStarted","Data":"3ba1d723bd951ffe42ed8058041695297f6d9eabdf3ecf2e535f33372bf4a446"} Jan 30 12:19:46 crc kubenswrapper[4703]: I0130 12:19:46.280168 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2e06db1b-bded-42ad-bd1e-8f7d76faf399","Type":"ContainerStarted","Data":"48aa4aedf0473662770adcc3cba881b2628ca96e26e186984c656a8fe6a6637d"} Jan 30 12:19:46 crc kubenswrapper[4703]: I0130 12:19:46.282805 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5bbf599fb5-drlk7" podUID="2ac378ee-c96a-41cb-881b-0100056b27ad" containerName="horizon-log" containerID="cri-o://8dc1baf060a43497da0bae36974ca6a96708b479d00c21dddb4892a9ac9ef5b5" gracePeriod=30 Jan 30 12:19:46 crc kubenswrapper[4703]: I0130 12:19:46.283247 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5bbf599fb5-drlk7" event={"ID":"2ac378ee-c96a-41cb-881b-0100056b27ad","Type":"ContainerStarted","Data":"6292b0000982418134967dc906695bb7c653583af79e35e4f551aa6955384f96"} Jan 30 12:19:46 crc kubenswrapper[4703]: I0130 12:19:46.283375 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5bbf599fb5-drlk7" podUID="2ac378ee-c96a-41cb-881b-0100056b27ad" containerName="horizon" containerID="cri-o://6292b0000982418134967dc906695bb7c653583af79e35e4f551aa6955384f96" gracePeriod=30 Jan 30 12:19:46 crc kubenswrapper[4703]: I0130 12:19:46.312366 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5f9958979d-8h859" podStartSLOduration=29.195655282 podStartE2EDuration="47.312337719s" podCreationTimestamp="2026-01-30 12:18:59 +0000 UTC" firstStartedPulling="2026-01-30 12:19:25.057025875 +0000 UTC m=+1400.834847529" lastFinishedPulling="2026-01-30 12:19:43.173708312 +0000 UTC m=+1418.951529966" observedRunningTime="2026-01-30 12:19:46.29787331 +0000 UTC m=+1422.075694984" watchObservedRunningTime="2026-01-30 12:19:46.312337719 +0000 UTC m=+1422.090159373" Jan 30 12:19:46 crc kubenswrapper[4703]: I0130 12:19:46.369214 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5bbf599fb5-drlk7" podStartSLOduration=6.8259622669999995 podStartE2EDuration="56.369107645s" podCreationTimestamp="2026-01-30 12:18:50 +0000 UTC" firstStartedPulling="2026-01-30 12:18:53.550630553 +0000 UTC m=+1369.328452197" lastFinishedPulling="2026-01-30 12:19:43.093775921 +0000 UTC m=+1418.871597575" observedRunningTime="2026-01-30 12:19:46.329900602 +0000 UTC m=+1422.107722266" watchObservedRunningTime="2026-01-30 12:19:46.369107645 +0000 UTC m=+1422.146929289" Jan 30 12:19:47 crc kubenswrapper[4703]: I0130 12:19:47.297905 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-594746f8cc-b7spb" podUID="6ea69f39-6623-4a6e-adff-6014bb7c749b" containerName="horizon-log" containerID="cri-o://b36c10043d0ee087fff4101d27a6cffd2fd7a36b90013c6d1dc568e780c494cc" gracePeriod=30 Jan 30 12:19:47 crc kubenswrapper[4703]: I0130 12:19:47.298280 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-85698bccdf-jvv85" podUID="f15d17d2-e8d6-49c8-a313-d815865271f0" containerName="horizon-log" containerID="cri-o://c9d34830c6c9528c2db9a2b08e868b06714aeb34fe386462412ba5b2fc0fcb0b" gracePeriod=30 Jan 30 12:19:47 crc kubenswrapper[4703]: I0130 12:19:47.300378 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-594746f8cc-b7spb" podUID="6ea69f39-6623-4a6e-adff-6014bb7c749b" containerName="horizon" containerID="cri-o://485b4e337950b032012f2a3276539bbaeec91bf21585acbd7dd0e778d654de83" gracePeriod=30 Jan 30 12:19:47 crc kubenswrapper[4703]: I0130 12:19:47.300504 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-85698bccdf-jvv85" podUID="f15d17d2-e8d6-49c8-a313-d815865271f0" containerName="horizon" containerID="cri-o://3ba1d723bd951ffe42ed8058041695297f6d9eabdf3ecf2e535f33372bf4a446" gracePeriod=30 Jan 30 12:19:47 crc kubenswrapper[4703]: I0130 12:19:47.336843 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-8nnnq" podStartSLOduration=21.336808357 podStartE2EDuration="21.336808357s" podCreationTimestamp="2026-01-30 12:19:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:19:47.335645816 +0000 UTC m=+1423.113467470" watchObservedRunningTime="2026-01-30 12:19:47.336808357 +0000 UTC m=+1423.114630011" Jan 30 12:19:47 crc kubenswrapper[4703]: I0130 12:19:47.363147 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-594746f8cc-b7spb" podStartSLOduration=8.420087867 podStartE2EDuration="58.363093905s" podCreationTimestamp="2026-01-30 12:18:49 +0000 UTC" firstStartedPulling="2026-01-30 12:18:52.902816171 +0000 UTC m=+1368.680637825" lastFinishedPulling="2026-01-30 12:19:42.845822209 +0000 UTC m=+1418.623643863" observedRunningTime="2026-01-30 12:19:47.361775979 +0000 UTC m=+1423.139597643" watchObservedRunningTime="2026-01-30 12:19:47.363093905 +0000 UTC m=+1423.140915559" Jan 30 12:19:47 crc kubenswrapper[4703]: I0130 12:19:47.392527 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-85698bccdf-jvv85" podStartSLOduration=7.023617707 podStartE2EDuration="54.392500536s" podCreationTimestamp="2026-01-30 12:18:53 +0000 UTC" firstStartedPulling="2026-01-30 12:18:55.679551731 +0000 UTC m=+1371.457373385" lastFinishedPulling="2026-01-30 12:19:43.04843456 +0000 UTC m=+1418.826256214" observedRunningTime="2026-01-30 12:19:47.384947642 +0000 UTC m=+1423.162769316" watchObservedRunningTime="2026-01-30 12:19:47.392500536 +0000 UTC m=+1423.170322190" Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.148960 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-95pfb" Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.315320 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rcq9s\" (UniqueName: \"kubernetes.io/projected/bd8fdb9b-147d-4634-a312-719f1c62c7ff-kube-api-access-rcq9s\") pod \"bd8fdb9b-147d-4634-a312-719f1c62c7ff\" (UID: \"bd8fdb9b-147d-4634-a312-719f1c62c7ff\") " Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.315663 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd8fdb9b-147d-4634-a312-719f1c62c7ff-combined-ca-bundle\") pod \"bd8fdb9b-147d-4634-a312-719f1c62c7ff\" (UID: \"bd8fdb9b-147d-4634-a312-719f1c62c7ff\") " Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.318303 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bd8fdb9b-147d-4634-a312-719f1c62c7ff-config\") pod \"bd8fdb9b-147d-4634-a312-719f1c62c7ff\" (UID: \"bd8fdb9b-147d-4634-a312-719f1c62c7ff\") " Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.328445 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd8fdb9b-147d-4634-a312-719f1c62c7ff-kube-api-access-rcq9s" (OuterVolumeSpecName: "kube-api-access-rcq9s") pod "bd8fdb9b-147d-4634-a312-719f1c62c7ff" (UID: "bd8fdb9b-147d-4634-a312-719f1c62c7ff"). InnerVolumeSpecName "kube-api-access-rcq9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.332588 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"572a81eb-68df-470a-9ca8-1febfc6167ad","Type":"ContainerStarted","Data":"37a63d0f9e4cd08d66ff7b2a23c08291f9552b813721b66cae5547fe2c7dfabb"} Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.344462 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2e06db1b-bded-42ad-bd1e-8f7d76faf399","Type":"ContainerStarted","Data":"0c096790719dd5337c355ae48bef2725394ace49dc8b6390b01c1f5ace1d1355"} Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.358487 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-95pfb" event={"ID":"bd8fdb9b-147d-4634-a312-719f1c62c7ff","Type":"ContainerDied","Data":"03c6baab368e9f1480617e8edba15643f71e8f35ae9be432e140a7f96788135e"} Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.358566 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03c6baab368e9f1480617e8edba15643f71e8f35ae9be432e140a7f96788135e" Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.358702 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-95pfb" Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.368580 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd8fdb9b-147d-4634-a312-719f1c62c7ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bd8fdb9b-147d-4634-a312-719f1c62c7ff" (UID: "bd8fdb9b-147d-4634-a312-719f1c62c7ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.376846 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77fb4cf9b8-pw692" event={"ID":"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a","Type":"ContainerStarted","Data":"a29c27b7fb233396218ace770f8222710c040473791f84a186e8849760d220e4"} Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.389186 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3d5e864b-4ad4-447d-8b20-6d9999cea7bb","Type":"ContainerStarted","Data":"5e2a67c6b92263c56ecd8e9554ff8f505f0e8f53d8344a3d84e6d9aa54ce988e"} Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.393197 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd8fdb9b-147d-4634-a312-719f1c62c7ff-config" (OuterVolumeSpecName: "config") pod "bd8fdb9b-147d-4634-a312-719f1c62c7ff" (UID: "bd8fdb9b-147d-4634-a312-719f1c62c7ff"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.420854 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/bd8fdb9b-147d-4634-a312-719f1c62c7ff-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.420898 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rcq9s\" (UniqueName: \"kubernetes.io/projected/bd8fdb9b-147d-4634-a312-719f1c62c7ff-kube-api-access-rcq9s\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.420910 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd8fdb9b-147d-4634-a312-719f1c62c7ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:48 crc kubenswrapper[4703]: I0130 12:19:48.428289 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-77fb4cf9b8-pw692" podStartSLOduration=30.313694088 podStartE2EDuration="48.428254638s" podCreationTimestamp="2026-01-30 12:19:00 +0000 UTC" firstStartedPulling="2026-01-30 12:19:25.059114971 +0000 UTC m=+1400.836936625" lastFinishedPulling="2026-01-30 12:19:43.173675521 +0000 UTC m=+1418.951497175" observedRunningTime="2026-01-30 12:19:48.402799532 +0000 UTC m=+1424.180621196" watchObservedRunningTime="2026-01-30 12:19:48.428254638 +0000 UTC m=+1424.206076292" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.445271 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-xmmwk"] Jan 30 12:19:49 crc kubenswrapper[4703]: E0130 12:19:49.446150 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd8fdb9b-147d-4634-a312-719f1c62c7ff" containerName="neutron-db-sync" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.446172 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd8fdb9b-147d-4634-a312-719f1c62c7ff" containerName="neutron-db-sync" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.446545 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd8fdb9b-147d-4634-a312-719f1c62c7ff" containerName="neutron-db-sync" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.459184 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.476686 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-xmmwk"] Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.537744 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-85fd6c789b-jdlgt"] Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.540649 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.553074 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.553180 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.553252 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.553566 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-drksr" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.558266 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.558461 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-config\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.558510 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.558552 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-dns-svc\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.558652 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fv4cj\" (UniqueName: \"kubernetes.io/projected/c24494ec-321b-4936-b75b-860fd4bce09b-kube-api-access-fv4cj\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.558758 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.579408 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-85fd6c789b-jdlgt"] Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.661454 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fv4cj\" (UniqueName: \"kubernetes.io/projected/c24494ec-321b-4936-b75b-860fd4bce09b-kube-api-access-fv4cj\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.663292 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.663435 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-ovndb-tls-certs\") pod \"neutron-85fd6c789b-jdlgt\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.663516 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-combined-ca-bundle\") pod \"neutron-85fd6c789b-jdlgt\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.663545 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-config\") pod \"neutron-85fd6c789b-jdlgt\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.663589 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.663629 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-httpd-config\") pod \"neutron-85fd6c789b-jdlgt\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.663695 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbpzz\" (UniqueName: \"kubernetes.io/projected/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-kube-api-access-nbpzz\") pod \"neutron-85fd6c789b-jdlgt\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.663737 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-config\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.663768 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.663808 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-dns-svc\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.665315 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-dns-svc\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.665445 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-ovsdbserver-nb\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.668403 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-dns-swift-storage-0\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.670023 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-config\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.670889 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-ovsdbserver-sb\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.731200 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fv4cj\" (UniqueName: \"kubernetes.io/projected/c24494ec-321b-4936-b75b-860fd4bce09b-kube-api-access-fv4cj\") pod \"dnsmasq-dns-55f844cf75-xmmwk\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.799380 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-ovndb-tls-certs\") pod \"neutron-85fd6c789b-jdlgt\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.799459 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-combined-ca-bundle\") pod \"neutron-85fd6c789b-jdlgt\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.799490 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-config\") pod \"neutron-85fd6c789b-jdlgt\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.799561 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-httpd-config\") pod \"neutron-85fd6c789b-jdlgt\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.799687 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbpzz\" (UniqueName: \"kubernetes.io/projected/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-kube-api-access-nbpzz\") pod \"neutron-85fd6c789b-jdlgt\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.810795 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.830430 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-ovndb-tls-certs\") pod \"neutron-85fd6c789b-jdlgt\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.830764 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-config\") pod \"neutron-85fd6c789b-jdlgt\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.835279 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-httpd-config\") pod \"neutron-85fd6c789b-jdlgt\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:49 crc kubenswrapper[4703]: I0130 12:19:49.845056 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-combined-ca-bundle\") pod \"neutron-85fd6c789b-jdlgt\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:50 crc kubenswrapper[4703]: I0130 12:19:50.046655 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbpzz\" (UniqueName: \"kubernetes.io/projected/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-kube-api-access-nbpzz\") pod \"neutron-85fd6c789b-jdlgt\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:50 crc kubenswrapper[4703]: I0130 12:19:50.181974 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:50 crc kubenswrapper[4703]: I0130 12:19:50.507131 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:50 crc kubenswrapper[4703]: I0130 12:19:50.508137 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:19:50 crc kubenswrapper[4703]: I0130 12:19:50.698966 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:19:50 crc kubenswrapper[4703]: I0130 12:19:50.881440 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:50 crc kubenswrapper[4703]: I0130 12:19:50.881517 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:19:51 crc kubenswrapper[4703]: I0130 12:19:51.026636 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-xmmwk"] Jan 30 12:19:51 crc kubenswrapper[4703]: I0130 12:19:51.297064 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-85fd6c789b-jdlgt"] Jan 30 12:19:51 crc kubenswrapper[4703]: W0130 12:19:51.311234 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac2f050c_f8d0_4ae5_9205_17e00f283e4b.slice/crio-dd55e773356b424c5681116df4ad8d66a6e372f1c854aa3546d3b5d9302523bb WatchSource:0}: Error finding container dd55e773356b424c5681116df4ad8d66a6e372f1c854aa3546d3b5d9302523bb: Status 404 returned error can't find the container with id dd55e773356b424c5681116df4ad8d66a6e372f1c854aa3546d3b5d9302523bb Jan 30 12:19:51 crc kubenswrapper[4703]: I0130 12:19:51.590150 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"26df44bd-bd05-4ed3-b146-fa1111db982e","Type":"ContainerStarted","Data":"ddfffd2731b492f8a028180da68fad74e9d83ce5f5552968d2ed9fc2d5b71b5d"} Jan 30 12:19:51 crc kubenswrapper[4703]: I0130 12:19:51.596342 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85fd6c789b-jdlgt" event={"ID":"ac2f050c-f8d0-4ae5-9205-17e00f283e4b","Type":"ContainerStarted","Data":"dd55e773356b424c5681116df4ad8d66a6e372f1c854aa3546d3b5d9302523bb"} Jan 30 12:19:51 crc kubenswrapper[4703]: I0130 12:19:51.612456 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" event={"ID":"c24494ec-321b-4936-b75b-860fd4bce09b","Type":"ContainerStarted","Data":"46d79ed257ab9a1cff8c326e30033fab7480990ef7dd62be1e3243df814ec3a0"} Jan 30 12:19:51 crc kubenswrapper[4703]: I0130 12:19:51.614698 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-applier-0" event={"ID":"78484d4c-53f4-4790-965f-2b8c22d7a9ce","Type":"ContainerStarted","Data":"bf3e736558987b037d01946bb28fdb75e3b89f7716146a1bcafc32267c76cd2f"} Jan 30 12:19:51 crc kubenswrapper[4703]: I0130 12:19:51.639478 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2e06db1b-bded-42ad-bd1e-8f7d76faf399","Type":"ContainerStarted","Data":"554ef222568526b96f371ff24cf6656b26fd015e644d5c749f496cef9d68ab04"} Jan 30 12:19:51 crc kubenswrapper[4703]: I0130 12:19:51.643550 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=31.789145018 podStartE2EDuration="37.643521466s" podCreationTimestamp="2026-01-30 12:19:14 +0000 UTC" firstStartedPulling="2026-01-30 12:19:44.339667816 +0000 UTC m=+1420.117489470" lastFinishedPulling="2026-01-30 12:19:50.194044264 +0000 UTC m=+1425.971865918" observedRunningTime="2026-01-30 12:19:51.617921471 +0000 UTC m=+1427.395743125" watchObservedRunningTime="2026-01-30 12:19:51.643521466 +0000 UTC m=+1427.421343120" Jan 30 12:19:51 crc kubenswrapper[4703]: I0130 12:19:51.674548 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-applier-0" podStartSLOduration=31.707125575 podStartE2EDuration="37.674512121s" podCreationTimestamp="2026-01-30 12:19:14 +0000 UTC" firstStartedPulling="2026-01-30 12:19:44.347694312 +0000 UTC m=+1420.125515966" lastFinishedPulling="2026-01-30 12:19:50.315080848 +0000 UTC m=+1426.092902512" observedRunningTime="2026-01-30 12:19:51.652230132 +0000 UTC m=+1427.430051786" watchObservedRunningTime="2026-01-30 12:19:51.674512121 +0000 UTC m=+1427.452333775" Jan 30 12:19:51 crc kubenswrapper[4703]: I0130 12:19:51.698922 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"41238b51-342b-4a59-b098-01f716f7a865","Type":"ContainerStarted","Data":"b1a6187e2c22c0e626d1088348b15d2fe1e5b8be71d34da8ab0aca0cd5dc69d9"} Jan 30 12:19:51 crc kubenswrapper[4703]: I0130 12:19:51.699492 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Jan 30 12:19:51 crc kubenswrapper[4703]: I0130 12:19:51.744154 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=38.744102759 podStartE2EDuration="38.744102759s" podCreationTimestamp="2026-01-30 12:19:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:19:51.694046688 +0000 UTC m=+1427.471868362" watchObservedRunningTime="2026-01-30 12:19:51.744102759 +0000 UTC m=+1427.521924413" Jan 30 12:19:51 crc kubenswrapper[4703]: I0130 12:19:51.744326 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=37.744320134 podStartE2EDuration="37.744320134s" podCreationTimestamp="2026-01-30 12:19:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:19:51.743726779 +0000 UTC m=+1427.521548453" watchObservedRunningTime="2026-01-30 12:19:51.744320134 +0000 UTC m=+1427.522141798" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.254888 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.534483 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-676db989fc-4rthl"] Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.542766 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.544508 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-676db989fc-4rthl"] Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.548562 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.549052 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.669375 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-httpd-config\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.669945 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-internal-tls-certs\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.669990 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5rwf\" (UniqueName: \"kubernetes.io/projected/62d4e769-8427-473f-8184-be89133bb4bc-kube-api-access-c5rwf\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.670284 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-config\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.670321 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-public-tls-certs\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.670358 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-combined-ca-bundle\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.670410 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-ovndb-tls-certs\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.719821 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85fd6c789b-jdlgt" event={"ID":"ac2f050c-f8d0-4ae5-9205-17e00f283e4b","Type":"ContainerStarted","Data":"7cac79a2b4c4c93950b47a0d1e832c934582542d6673b18d7481bb7237de22c3"} Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.737986 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3d5e864b-4ad4-447d-8b20-6d9999cea7bb","Type":"ContainerStarted","Data":"598a787f498683049fc201ad26000b02a7f839024e16b91d30c5f557e8ea1365"} Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.754592 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" event={"ID":"c24494ec-321b-4936-b75b-860fd4bce09b","Type":"ContainerStarted","Data":"7c113a00f5149b9744478a061a1a656abfd3e4722192c94585220e40203b9b6c"} Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.782796 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5rwf\" (UniqueName: \"kubernetes.io/projected/62d4e769-8427-473f-8184-be89133bb4bc-kube-api-access-c5rwf\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.783059 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-config\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.783081 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-public-tls-certs\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.783105 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-combined-ca-bundle\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.783195 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-ovndb-tls-certs\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.783539 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-httpd-config\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.783586 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-internal-tls-certs\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:52 crc kubenswrapper[4703]: I0130 12:19:52.783908 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=10.783881268 podStartE2EDuration="10.783881268s" podCreationTimestamp="2026-01-30 12:19:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:19:52.765913162 +0000 UTC m=+1428.543734816" watchObservedRunningTime="2026-01-30 12:19:52.783881268 +0000 UTC m=+1428.561702922" Jan 30 12:19:53 crc kubenswrapper[4703]: I0130 12:19:53.074261 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5rwf\" (UniqueName: \"kubernetes.io/projected/62d4e769-8427-473f-8184-be89133bb4bc-kube-api-access-c5rwf\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:53 crc kubenswrapper[4703]: I0130 12:19:53.081869 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-internal-tls-certs\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:53 crc kubenswrapper[4703]: I0130 12:19:53.082248 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-ovndb-tls-certs\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:53 crc kubenswrapper[4703]: I0130 12:19:53.085158 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-config\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:53 crc kubenswrapper[4703]: I0130 12:19:53.092757 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-httpd-config\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:53 crc kubenswrapper[4703]: I0130 12:19:53.095294 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-public-tls-certs\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:53 crc kubenswrapper[4703]: I0130 12:19:53.108501 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-combined-ca-bundle\") pod \"neutron-676db989fc-4rthl\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:53 crc kubenswrapper[4703]: I0130 12:19:53.191188 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:53 crc kubenswrapper[4703]: I0130 12:19:53.611023 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 30 12:19:53 crc kubenswrapper[4703]: I0130 12:19:53.612044 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 30 12:19:53 crc kubenswrapper[4703]: I0130 12:19:53.734608 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 30 12:19:53 crc kubenswrapper[4703]: I0130 12:19:53.761052 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 30 12:19:53 crc kubenswrapper[4703]: I0130 12:19:53.819846 4703 generic.go:334] "Generic (PLEG): container finished" podID="c24494ec-321b-4936-b75b-860fd4bce09b" containerID="7c113a00f5149b9744478a061a1a656abfd3e4722192c94585220e40203b9b6c" exitCode=0 Jan 30 12:19:53 crc kubenswrapper[4703]: I0130 12:19:53.820228 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" event={"ID":"c24494ec-321b-4936-b75b-860fd4bce09b","Type":"ContainerDied","Data":"7c113a00f5149b9744478a061a1a656abfd3e4722192c94585220e40203b9b6c"} Jan 30 12:19:53 crc kubenswrapper[4703]: I0130 12:19:53.822077 4703 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 12:19:53 crc kubenswrapper[4703]: I0130 12:19:53.823531 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 30 12:19:53 crc kubenswrapper[4703]: I0130 12:19:53.823563 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 30 12:19:54 crc kubenswrapper[4703]: I0130 12:19:54.199752 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:19:54 crc kubenswrapper[4703]: I0130 12:19:54.233495 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-676db989fc-4rthl"] Jan 30 12:19:54 crc kubenswrapper[4703]: W0130 12:19:54.261066 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod62d4e769_8427_473f_8184_be89133bb4bc.slice/crio-eb09ba8e596f8e819a7d62c2369dfaa1aaee80e315637fd5a5b3d58a38602ff3 WatchSource:0}: Error finding container eb09ba8e596f8e819a7d62c2369dfaa1aaee80e315637fd5a5b3d58a38602ff3: Status 404 returned error can't find the container with id eb09ba8e596f8e819a7d62c2369dfaa1aaee80e315637fd5a5b3d58a38602ff3 Jan 30 12:19:54 crc kubenswrapper[4703]: I0130 12:19:54.426600 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Jan 30 12:19:54 crc kubenswrapper[4703]: I0130 12:19:54.426668 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Jan 30 12:19:54 crc kubenswrapper[4703]: I0130 12:19:54.503668 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 30 12:19:54 crc kubenswrapper[4703]: I0130 12:19:54.542675 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-applier-0" Jan 30 12:19:54 crc kubenswrapper[4703]: I0130 12:19:54.542742 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-applier-0" Jan 30 12:19:54 crc kubenswrapper[4703]: I0130 12:19:54.586640 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Jan 30 12:19:54 crc kubenswrapper[4703]: I0130 12:19:54.710375 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-applier-0" Jan 30 12:19:54 crc kubenswrapper[4703]: I0130 12:19:54.866883 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85fd6c789b-jdlgt" event={"ID":"ac2f050c-f8d0-4ae5-9205-17e00f283e4b","Type":"ContainerStarted","Data":"f78b0f9d84606f39b963ac7f2e9ea287421ab9ebf7d40a6a73dbb116f96c8667"} Jan 30 12:19:54 crc kubenswrapper[4703]: I0130 12:19:54.871551 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-676db989fc-4rthl" event={"ID":"62d4e769-8427-473f-8184-be89133bb4bc","Type":"ContainerStarted","Data":"eb09ba8e596f8e819a7d62c2369dfaa1aaee80e315637fd5a5b3d58a38602ff3"} Jan 30 12:19:54 crc kubenswrapper[4703]: I0130 12:19:54.872401 4703 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 12:19:54 crc kubenswrapper[4703]: I0130 12:19:54.873599 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Jan 30 12:19:54 crc kubenswrapper[4703]: I0130 12:19:54.932254 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Jan 30 12:19:54 crc kubenswrapper[4703]: I0130 12:19:54.953187 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-applier-0" Jan 30 12:19:55 crc kubenswrapper[4703]: I0130 12:19:55.468408 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/watcher-api-0" podUID="41238b51-342b-4a59-b098-01f716f7a865" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 12:19:55 crc kubenswrapper[4703]: I0130 12:19:55.864434 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Jan 30 12:19:55 crc kubenswrapper[4703]: I0130 12:19:55.903272 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" event={"ID":"c24494ec-321b-4936-b75b-860fd4bce09b","Type":"ContainerStarted","Data":"e2ac3ce601974b00bff7b2f070d085019bf50b31648c83b4f12c49345ad136b4"} Jan 30 12:19:55 crc kubenswrapper[4703]: I0130 12:19:55.903856 4703 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 12:19:56 crc kubenswrapper[4703]: I0130 12:19:56.920184 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-676db989fc-4rthl" event={"ID":"62d4e769-8427-473f-8184-be89133bb4bc","Type":"ContainerStarted","Data":"4c41d93f84f1c54e2ae7edb332eb5ae9e18ea295b155d77b1e1f1f76559bfaf4"} Jan 30 12:19:56 crc kubenswrapper[4703]: I0130 12:19:56.923069 4703 generic.go:334] "Generic (PLEG): container finished" podID="147c2570-9be4-4fb1-9789-f7bc204119db" containerID="b8cecdf7f5f56768b007344414621396ae5457fcf96b89f2d2cbae7fa9ba8cdb" exitCode=0 Jan 30 12:19:56 crc kubenswrapper[4703]: I0130 12:19:56.923159 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8nnnq" event={"ID":"147c2570-9be4-4fb1-9789-f7bc204119db","Type":"ContainerDied","Data":"b8cecdf7f5f56768b007344414621396ae5457fcf96b89f2d2cbae7fa9ba8cdb"} Jan 30 12:19:56 crc kubenswrapper[4703]: I0130 12:19:56.923982 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:19:56 crc kubenswrapper[4703]: I0130 12:19:56.924301 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:19:56 crc kubenswrapper[4703]: I0130 12:19:56.965473 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-85fd6c789b-jdlgt" podStartSLOduration=7.965441811 podStartE2EDuration="7.965441811s" podCreationTimestamp="2026-01-30 12:19:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:19:56.960161433 +0000 UTC m=+1432.737983087" watchObservedRunningTime="2026-01-30 12:19:56.965441811 +0000 UTC m=+1432.743263465" Jan 30 12:19:56 crc kubenswrapper[4703]: I0130 12:19:56.999544 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" podStartSLOduration=7.999514765 podStartE2EDuration="7.999514765s" podCreationTimestamp="2026-01-30 12:19:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:19:56.986549229 +0000 UTC m=+1432.764370883" watchObservedRunningTime="2026-01-30 12:19:56.999514765 +0000 UTC m=+1432.777336419" Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.546169 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.726056 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ph2pq\" (UniqueName: \"kubernetes.io/projected/147c2570-9be4-4fb1-9789-f7bc204119db-kube-api-access-ph2pq\") pod \"147c2570-9be4-4fb1-9789-f7bc204119db\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.726192 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-scripts\") pod \"147c2570-9be4-4fb1-9789-f7bc204119db\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.726260 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-config-data\") pod \"147c2570-9be4-4fb1-9789-f7bc204119db\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.726316 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-fernet-keys\") pod \"147c2570-9be4-4fb1-9789-f7bc204119db\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.726572 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-credential-keys\") pod \"147c2570-9be4-4fb1-9789-f7bc204119db\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.726624 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-combined-ca-bundle\") pod \"147c2570-9be4-4fb1-9789-f7bc204119db\" (UID: \"147c2570-9be4-4fb1-9789-f7bc204119db\") " Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.735383 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-scripts" (OuterVolumeSpecName: "scripts") pod "147c2570-9be4-4fb1-9789-f7bc204119db" (UID: "147c2570-9be4-4fb1-9789-f7bc204119db"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.736373 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/147c2570-9be4-4fb1-9789-f7bc204119db-kube-api-access-ph2pq" (OuterVolumeSpecName: "kube-api-access-ph2pq") pod "147c2570-9be4-4fb1-9789-f7bc204119db" (UID: "147c2570-9be4-4fb1-9789-f7bc204119db"). InnerVolumeSpecName "kube-api-access-ph2pq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.738701 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "147c2570-9be4-4fb1-9789-f7bc204119db" (UID: "147c2570-9be4-4fb1-9789-f7bc204119db"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.763511 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "147c2570-9be4-4fb1-9789-f7bc204119db" (UID: "147c2570-9be4-4fb1-9789-f7bc204119db"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.797325 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "147c2570-9be4-4fb1-9789-f7bc204119db" (UID: "147c2570-9be4-4fb1-9789-f7bc204119db"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.808037 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-config-data" (OuterVolumeSpecName: "config-data") pod "147c2570-9be4-4fb1-9789-f7bc204119db" (UID: "147c2570-9be4-4fb1-9789-f7bc204119db"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.830218 4703 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.830268 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.830284 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ph2pq\" (UniqueName: \"kubernetes.io/projected/147c2570-9be4-4fb1-9789-f7bc204119db-kube-api-access-ph2pq\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.830302 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.830312 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.830320 4703 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/147c2570-9be4-4fb1-9789-f7bc204119db-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.953026 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-8nnnq" Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.953016 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-8nnnq" event={"ID":"147c2570-9be4-4fb1-9789-f7bc204119db","Type":"ContainerDied","Data":"a087cfabfbe230a4e382cb0d1c5d53987df3f81f6dfcb8f058109e5488568eef"} Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.953205 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a087cfabfbe230a4e382cb0d1c5d53987df3f81f6dfcb8f058109e5488568eef" Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.957992 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-676db989fc-4rthl" event={"ID":"62d4e769-8427-473f-8184-be89133bb4bc","Type":"ContainerStarted","Data":"a598bc6d257190579fae80d1f2fe82b84ac0ae7ac2e04a7cb34954a853b0fd27"} Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.958212 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:19:58 crc kubenswrapper[4703]: I0130 12:19:58.995017 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-676db989fc-4rthl" podStartSLOduration=6.994986312 podStartE2EDuration="6.994986312s" podCreationTimestamp="2026-01-30 12:19:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:19:58.983523603 +0000 UTC m=+1434.761345247" watchObservedRunningTime="2026-01-30 12:19:58.994986312 +0000 UTC m=+1434.772807956" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.196137 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7946f9d8b5-42hg8"] Jan 30 12:19:59 crc kubenswrapper[4703]: E0130 12:19:59.197675 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="147c2570-9be4-4fb1-9789-f7bc204119db" containerName="keystone-bootstrap" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.197707 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="147c2570-9be4-4fb1-9789-f7bc204119db" containerName="keystone-bootstrap" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.198035 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="147c2570-9be4-4fb1-9789-f7bc204119db" containerName="keystone-bootstrap" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.199099 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.202209 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-cz6x7" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.202634 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.202916 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.203232 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.204082 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.205543 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.241236 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7946f9d8b5-42hg8"] Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.343098 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-internal-tls-certs\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.344201 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-credential-keys\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.344334 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqwnv\" (UniqueName: \"kubernetes.io/projected/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-kube-api-access-sqwnv\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.344433 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-scripts\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.344561 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-public-tls-certs\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.344901 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-fernet-keys\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.344969 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-combined-ca-bundle\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.345041 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-config-data\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.448507 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-internal-tls-certs\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.448577 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-credential-keys\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.448605 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqwnv\" (UniqueName: \"kubernetes.io/projected/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-kube-api-access-sqwnv\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.448627 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-scripts\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.448688 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-public-tls-certs\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.448748 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-combined-ca-bundle\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.448770 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-fernet-keys\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.448800 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-config-data\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.455384 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-public-tls-certs\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.456757 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-credential-keys\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.457295 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-config-data\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.462800 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-internal-tls-certs\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.463706 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-scripts\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.469335 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-combined-ca-bundle\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.477923 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqwnv\" (UniqueName: \"kubernetes.io/projected/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-kube-api-access-sqwnv\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.480285 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/67c4cf21-bf63-4d34-a72a-4e881bcc2c7d-fernet-keys\") pod \"keystone-7946f9d8b5-42hg8\" (UID: \"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d\") " pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:19:59 crc kubenswrapper[4703]: I0130 12:19:59.537721 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:20:00 crc kubenswrapper[4703]: I0130 12:20:00.503540 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5f9958979d-8h859" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.159:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.159:8443: connect: connection refused" Jan 30 12:20:00 crc kubenswrapper[4703]: I0130 12:20:00.881298 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-77fb4cf9b8-pw692" podUID="9c6d3262-7469-45ac-b5c8-9eb0f9456a5a" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.160:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.160:8443: connect: connection refused" Jan 30 12:20:02 crc kubenswrapper[4703]: I0130 12:20:02.491508 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 30 12:20:02 crc kubenswrapper[4703]: I0130 12:20:02.491595 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 30 12:20:02 crc kubenswrapper[4703]: I0130 12:20:02.537930 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 30 12:20:02 crc kubenswrapper[4703]: I0130 12:20:02.561351 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 30 12:20:02 crc kubenswrapper[4703]: I0130 12:20:02.999198 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 30 12:20:02 crc kubenswrapper[4703]: I0130 12:20:02.999718 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 30 12:20:04 crc kubenswrapper[4703]: I0130 12:20:04.431811 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Jan 30 12:20:04 crc kubenswrapper[4703]: I0130 12:20:04.438391 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Jan 30 12:20:04 crc kubenswrapper[4703]: I0130 12:20:04.593717 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Jan 30 12:20:04 crc kubenswrapper[4703]: I0130 12:20:04.816294 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:20:05 crc kubenswrapper[4703]: I0130 12:20:05.006379 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-mdl7r"] Jan 30 12:20:05 crc kubenswrapper[4703]: I0130 12:20:05.006826 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" podUID="638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" containerName="dnsmasq-dns" containerID="cri-o://4724661b321f6ce5f753731575ce55e816165e7242955e0a3f3e31646fd199b6" gracePeriod=10 Jan 30 12:20:06 crc kubenswrapper[4703]: I0130 12:20:06.110137 4703 generic.go:334] "Generic (PLEG): container finished" podID="638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" containerID="4724661b321f6ce5f753731575ce55e816165e7242955e0a3f3e31646fd199b6" exitCode=0 Jan 30 12:20:06 crc kubenswrapper[4703]: I0130 12:20:06.110814 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="41238b51-342b-4a59-b098-01f716f7a865" containerName="watcher-api-log" containerID="cri-o://158f5fe6d1c7b33cb21d36308c61f42bbb8ebc6e9fd9564ec1ab6f6dbeeefdf3" gracePeriod=30 Jan 30 12:20:06 crc kubenswrapper[4703]: I0130 12:20:06.110186 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" event={"ID":"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84","Type":"ContainerDied","Data":"4724661b321f6ce5f753731575ce55e816165e7242955e0a3f3e31646fd199b6"} Jan 30 12:20:06 crc kubenswrapper[4703]: I0130 12:20:06.111380 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-api-0" podUID="41238b51-342b-4a59-b098-01f716f7a865" containerName="watcher-api" containerID="cri-o://b1a6187e2c22c0e626d1088348b15d2fe1e5b8be71d34da8ab0aca0cd5dc69d9" gracePeriod=30 Jan 30 12:20:06 crc kubenswrapper[4703]: I0130 12:20:06.655094 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" podUID="638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.153:5353: connect: connection refused" Jan 30 12:20:07 crc kubenswrapper[4703]: I0130 12:20:07.125250 4703 generic.go:334] "Generic (PLEG): container finished" podID="41238b51-342b-4a59-b098-01f716f7a865" containerID="158f5fe6d1c7b33cb21d36308c61f42bbb8ebc6e9fd9564ec1ab6f6dbeeefdf3" exitCode=143 Jan 30 12:20:07 crc kubenswrapper[4703]: I0130 12:20:07.125333 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"41238b51-342b-4a59-b098-01f716f7a865","Type":"ContainerDied","Data":"158f5fe6d1c7b33cb21d36308c61f42bbb8ebc6e9fd9564ec1ab6f6dbeeefdf3"} Jan 30 12:20:09 crc kubenswrapper[4703]: I0130 12:20:09.550081 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="41238b51-342b-4a59-b098-01f716f7a865" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.162:9322/\": read tcp 10.217.0.2:57310->10.217.0.162:9322: read: connection reset by peer" Jan 30 12:20:09 crc kubenswrapper[4703]: I0130 12:20:09.550216 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="41238b51-342b-4a59-b098-01f716f7a865" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9322/\": read tcp 10.217.0.2:57318->10.217.0.162:9322: read: connection reset by peer" Jan 30 12:20:10 crc kubenswrapper[4703]: I0130 12:20:10.501223 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5f9958979d-8h859" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.159:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.159:8443: connect: connection refused" Jan 30 12:20:10 crc kubenswrapper[4703]: I0130 12:20:10.879500 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-77fb4cf9b8-pw692" podUID="9c6d3262-7469-45ac-b5c8-9eb0f9456a5a" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.160:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.160:8443: connect: connection refused" Jan 30 12:20:11 crc kubenswrapper[4703]: I0130 12:20:11.218638 4703 generic.go:334] "Generic (PLEG): container finished" podID="41238b51-342b-4a59-b098-01f716f7a865" containerID="b1a6187e2c22c0e626d1088348b15d2fe1e5b8be71d34da8ab0aca0cd5dc69d9" exitCode=0 Jan 30 12:20:11 crc kubenswrapper[4703]: I0130 12:20:11.219037 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"41238b51-342b-4a59-b098-01f716f7a865","Type":"ContainerDied","Data":"b1a6187e2c22c0e626d1088348b15d2fe1e5b8be71d34da8ab0aca0cd5dc69d9"} Jan 30 12:20:11 crc kubenswrapper[4703]: I0130 12:20:11.654725 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" podUID="638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.153:5353: connect: connection refused" Jan 30 12:20:12 crc kubenswrapper[4703]: I0130 12:20:12.230791 4703 generic.go:334] "Generic (PLEG): container finished" podID="273a2195-27a6-4229-b3c0-8c67d9fc4302" containerID="3a38b773280de96cef8739a5579de3e9421f01c64a9e3befcc4c24a982f1eead" exitCode=0 Jan 30 12:20:12 crc kubenswrapper[4703]: I0130 12:20:12.230848 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-mmqwb" event={"ID":"273a2195-27a6-4229-b3c0-8c67d9fc4302","Type":"ContainerDied","Data":"3a38b773280de96cef8739a5579de3e9421f01c64a9e3befcc4c24a982f1eead"} Jan 30 12:20:12 crc kubenswrapper[4703]: I0130 12:20:12.823101 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:20:12 crc kubenswrapper[4703]: I0130 12:20:12.823808 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:20:14 crc kubenswrapper[4703]: I0130 12:20:14.426744 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="41238b51-342b-4a59-b098-01f716f7a865" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9322/\": dial tcp 10.217.0.162:9322: connect: connection refused" Jan 30 12:20:14 crc kubenswrapper[4703]: I0130 12:20:14.426744 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="41238b51-342b-4a59-b098-01f716f7a865" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.162:9322/\": dial tcp 10.217.0.162:9322: connect: connection refused" Jan 30 12:20:16 crc kubenswrapper[4703]: I0130 12:20:16.807085 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 30 12:20:16 crc kubenswrapper[4703]: I0130 12:20:16.808264 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 30 12:20:17 crc kubenswrapper[4703]: I0130 12:20:17.291424 4703 generic.go:334] "Generic (PLEG): container finished" podID="2ac378ee-c96a-41cb-881b-0100056b27ad" containerID="6292b0000982418134967dc906695bb7c653583af79e35e4f551aa6955384f96" exitCode=137 Jan 30 12:20:17 crc kubenswrapper[4703]: I0130 12:20:17.291475 4703 generic.go:334] "Generic (PLEG): container finished" podID="2ac378ee-c96a-41cb-881b-0100056b27ad" containerID="8dc1baf060a43497da0bae36974ca6a96708b479d00c21dddb4892a9ac9ef5b5" exitCode=137 Jan 30 12:20:17 crc kubenswrapper[4703]: I0130 12:20:17.291497 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5bbf599fb5-drlk7" event={"ID":"2ac378ee-c96a-41cb-881b-0100056b27ad","Type":"ContainerDied","Data":"6292b0000982418134967dc906695bb7c653583af79e35e4f551aa6955384f96"} Jan 30 12:20:17 crc kubenswrapper[4703]: I0130 12:20:17.291554 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5bbf599fb5-drlk7" event={"ID":"2ac378ee-c96a-41cb-881b-0100056b27ad","Type":"ContainerDied","Data":"8dc1baf060a43497da0bae36974ca6a96708b479d00c21dddb4892a9ac9ef5b5"} Jan 30 12:20:18 crc kubenswrapper[4703]: I0130 12:20:18.309226 4703 generic.go:334] "Generic (PLEG): container finished" podID="6ea69f39-6623-4a6e-adff-6014bb7c749b" containerID="485b4e337950b032012f2a3276539bbaeec91bf21585acbd7dd0e778d654de83" exitCode=137 Jan 30 12:20:18 crc kubenswrapper[4703]: I0130 12:20:18.309647 4703 generic.go:334] "Generic (PLEG): container finished" podID="6ea69f39-6623-4a6e-adff-6014bb7c749b" containerID="b36c10043d0ee087fff4101d27a6cffd2fd7a36b90013c6d1dc568e780c494cc" exitCode=137 Jan 30 12:20:18 crc kubenswrapper[4703]: I0130 12:20:18.309304 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-594746f8cc-b7spb" event={"ID":"6ea69f39-6623-4a6e-adff-6014bb7c749b","Type":"ContainerDied","Data":"485b4e337950b032012f2a3276539bbaeec91bf21585acbd7dd0e778d654de83"} Jan 30 12:20:18 crc kubenswrapper[4703]: I0130 12:20:18.309711 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-594746f8cc-b7spb" event={"ID":"6ea69f39-6623-4a6e-adff-6014bb7c749b","Type":"ContainerDied","Data":"b36c10043d0ee087fff4101d27a6cffd2fd7a36b90013c6d1dc568e780c494cc"} Jan 30 12:20:18 crc kubenswrapper[4703]: I0130 12:20:18.320873 4703 generic.go:334] "Generic (PLEG): container finished" podID="f15d17d2-e8d6-49c8-a313-d815865271f0" containerID="3ba1d723bd951ffe42ed8058041695297f6d9eabdf3ecf2e535f33372bf4a446" exitCode=137 Jan 30 12:20:18 crc kubenswrapper[4703]: I0130 12:20:18.320923 4703 generic.go:334] "Generic (PLEG): container finished" podID="f15d17d2-e8d6-49c8-a313-d815865271f0" containerID="c9d34830c6c9528c2db9a2b08e868b06714aeb34fe386462412ba5b2fc0fcb0b" exitCode=137 Jan 30 12:20:18 crc kubenswrapper[4703]: I0130 12:20:18.320955 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85698bccdf-jvv85" event={"ID":"f15d17d2-e8d6-49c8-a313-d815865271f0","Type":"ContainerDied","Data":"3ba1d723bd951ffe42ed8058041695297f6d9eabdf3ecf2e535f33372bf4a446"} Jan 30 12:20:18 crc kubenswrapper[4703]: I0130 12:20:18.321049 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85698bccdf-jvv85" event={"ID":"f15d17d2-e8d6-49c8-a313-d815865271f0","Type":"ContainerDied","Data":"c9d34830c6c9528c2db9a2b08e868b06714aeb34fe386462412ba5b2fc0fcb0b"} Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.026093 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.026806 4703 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.104559 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.202646 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-85fd6c789b-jdlgt" podUID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.206383 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-85fd6c789b-jdlgt" podUID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" containerName="neutron-api" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.206456 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-85fd6c789b-jdlgt" podUID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.502239 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5f9958979d-8h859" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.159:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.159:8443: connect: connection refused" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.503000 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.504081 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"44bd0eeab3c7b4371ebc41e5f7618026824b17804f7a8c0f1225abd278d6ca11"} pod="openstack/horizon-5f9958979d-8h859" containerMessage="Container horizon failed startup probe, will be restarted" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.504149 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5f9958979d-8h859" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" containerID="cri-o://44bd0eeab3c7b4371ebc41e5f7618026824b17804f7a8c0f1225abd278d6ca11" gracePeriod=30 Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.696240 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-mmqwb" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.805522 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7b9n9\" (UniqueName: \"kubernetes.io/projected/273a2195-27a6-4229-b3c0-8c67d9fc4302-kube-api-access-7b9n9\") pod \"273a2195-27a6-4229-b3c0-8c67d9fc4302\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.805633 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/273a2195-27a6-4229-b3c0-8c67d9fc4302-logs\") pod \"273a2195-27a6-4229-b3c0-8c67d9fc4302\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.805701 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-scripts\") pod \"273a2195-27a6-4229-b3c0-8c67d9fc4302\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.805733 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-combined-ca-bundle\") pod \"273a2195-27a6-4229-b3c0-8c67d9fc4302\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.806013 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-config-data\") pod \"273a2195-27a6-4229-b3c0-8c67d9fc4302\" (UID: \"273a2195-27a6-4229-b3c0-8c67d9fc4302\") " Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.806114 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/273a2195-27a6-4229-b3c0-8c67d9fc4302-logs" (OuterVolumeSpecName: "logs") pod "273a2195-27a6-4229-b3c0-8c67d9fc4302" (UID: "273a2195-27a6-4229-b3c0-8c67d9fc4302"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.807644 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/273a2195-27a6-4229-b3c0-8c67d9fc4302-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.831097 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-scripts" (OuterVolumeSpecName: "scripts") pod "273a2195-27a6-4229-b3c0-8c67d9fc4302" (UID: "273a2195-27a6-4229-b3c0-8c67d9fc4302"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.831661 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/273a2195-27a6-4229-b3c0-8c67d9fc4302-kube-api-access-7b9n9" (OuterVolumeSpecName: "kube-api-access-7b9n9") pod "273a2195-27a6-4229-b3c0-8c67d9fc4302" (UID: "273a2195-27a6-4229-b3c0-8c67d9fc4302"). InnerVolumeSpecName "kube-api-access-7b9n9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.851353 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-config-data" (OuterVolumeSpecName: "config-data") pod "273a2195-27a6-4229-b3c0-8c67d9fc4302" (UID: "273a2195-27a6-4229-b3c0-8c67d9fc4302"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.870687 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "273a2195-27a6-4229-b3c0-8c67d9fc4302" (UID: "273a2195-27a6-4229-b3c0-8c67d9fc4302"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.880873 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-77fb4cf9b8-pw692" podUID="9c6d3262-7469-45ac-b5c8-9eb0f9456a5a" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.160:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.160:8443: connect: connection refused" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.881005 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.882288 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"a29c27b7fb233396218ace770f8222710c040473791f84a186e8849760d220e4"} pod="openstack/horizon-77fb4cf9b8-pw692" containerMessage="Container horizon failed startup probe, will be restarted" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.882351 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-77fb4cf9b8-pw692" podUID="9c6d3262-7469-45ac-b5c8-9eb0f9456a5a" containerName="horizon" containerID="cri-o://a29c27b7fb233396218ace770f8222710c040473791f84a186e8849760d220e4" gracePeriod=30 Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.909444 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.909496 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7b9n9\" (UniqueName: \"kubernetes.io/projected/273a2195-27a6-4229-b3c0-8c67d9fc4302-kube-api-access-7b9n9\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.909507 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:20 crc kubenswrapper[4703]: I0130 12:20:20.909519 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/273a2195-27a6-4229-b3c0-8c67d9fc4302-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:21 crc kubenswrapper[4703]: I0130 12:20:21.358551 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-mmqwb" event={"ID":"273a2195-27a6-4229-b3c0-8c67d9fc4302","Type":"ContainerDied","Data":"627251f02791c2b24b276cbac63b5751958cfaa5e2ac6fd4e551bd54197fe205"} Jan 30 12:20:21 crc kubenswrapper[4703]: I0130 12:20:21.358617 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="627251f02791c2b24b276cbac63b5751958cfaa5e2ac6fd4e551bd54197fe205" Jan 30 12:20:21 crc kubenswrapper[4703]: I0130 12:20:21.358657 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-mmqwb" Jan 30 12:20:21 crc kubenswrapper[4703]: I0130 12:20:21.667481 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" podUID="638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.153:5353: i/o timeout" Jan 30 12:20:21 crc kubenswrapper[4703]: I0130 12:20:21.667931 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:20:21 crc kubenswrapper[4703]: I0130 12:20:21.912541 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5bb45bd7f4-hsvwp"] Jan 30 12:20:21 crc kubenswrapper[4703]: E0130 12:20:21.914211 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="273a2195-27a6-4229-b3c0-8c67d9fc4302" containerName="placement-db-sync" Jan 30 12:20:21 crc kubenswrapper[4703]: I0130 12:20:21.914236 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="273a2195-27a6-4229-b3c0-8c67d9fc4302" containerName="placement-db-sync" Jan 30 12:20:21 crc kubenswrapper[4703]: I0130 12:20:21.914451 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="273a2195-27a6-4229-b3c0-8c67d9fc4302" containerName="placement-db-sync" Jan 30 12:20:21 crc kubenswrapper[4703]: I0130 12:20:21.915909 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:21 crc kubenswrapper[4703]: I0130 12:20:21.919546 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 30 12:20:21 crc kubenswrapper[4703]: I0130 12:20:21.919723 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-jwlw6" Jan 30 12:20:21 crc kubenswrapper[4703]: I0130 12:20:21.920038 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 30 12:20:21 crc kubenswrapper[4703]: I0130 12:20:21.927157 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 30 12:20:21 crc kubenswrapper[4703]: I0130 12:20:21.927167 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 30 12:20:21 crc kubenswrapper[4703]: I0130 12:20:21.938587 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5bb45bd7f4-hsvwp"] Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.043078 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ab801f17-5c1d-4e5d-9e0c-24778ca21833-logs\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.043166 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab801f17-5c1d-4e5d-9e0c-24778ca21833-scripts\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.043244 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cbtg\" (UniqueName: \"kubernetes.io/projected/ab801f17-5c1d-4e5d-9e0c-24778ca21833-kube-api-access-4cbtg\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.044006 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab801f17-5c1d-4e5d-9e0c-24778ca21833-config-data\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.044302 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab801f17-5c1d-4e5d-9e0c-24778ca21833-combined-ca-bundle\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.044472 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab801f17-5c1d-4e5d-9e0c-24778ca21833-internal-tls-certs\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.044563 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab801f17-5c1d-4e5d-9e0c-24778ca21833-public-tls-certs\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.147205 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab801f17-5c1d-4e5d-9e0c-24778ca21833-scripts\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.147305 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cbtg\" (UniqueName: \"kubernetes.io/projected/ab801f17-5c1d-4e5d-9e0c-24778ca21833-kube-api-access-4cbtg\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.147461 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab801f17-5c1d-4e5d-9e0c-24778ca21833-config-data\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.147514 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab801f17-5c1d-4e5d-9e0c-24778ca21833-combined-ca-bundle\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.147558 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab801f17-5c1d-4e5d-9e0c-24778ca21833-internal-tls-certs\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.147584 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab801f17-5c1d-4e5d-9e0c-24778ca21833-public-tls-certs\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.147610 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ab801f17-5c1d-4e5d-9e0c-24778ca21833-logs\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.148180 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ab801f17-5c1d-4e5d-9e0c-24778ca21833-logs\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.153496 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab801f17-5c1d-4e5d-9e0c-24778ca21833-scripts\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.154590 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab801f17-5c1d-4e5d-9e0c-24778ca21833-combined-ca-bundle\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.154910 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab801f17-5c1d-4e5d-9e0c-24778ca21833-internal-tls-certs\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.157547 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab801f17-5c1d-4e5d-9e0c-24778ca21833-public-tls-certs\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.167244 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab801f17-5c1d-4e5d-9e0c-24778ca21833-config-data\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.173785 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cbtg\" (UniqueName: \"kubernetes.io/projected/ab801f17-5c1d-4e5d-9e0c-24778ca21833-kube-api-access-4cbtg\") pod \"placement-5bb45bd7f4-hsvwp\" (UID: \"ab801f17-5c1d-4e5d-9e0c-24778ca21833\") " pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.241512 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.401572 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" event={"ID":"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84","Type":"ContainerDied","Data":"237f7167f7136778eeb1feafe9ae884bdf8f50a35f08763339b9784e19e600ce"} Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.401640 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="237f7167f7136778eeb1feafe9ae884bdf8f50a35f08763339b9784e19e600ce" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.403731 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.435632 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"41238b51-342b-4a59-b098-01f716f7a865","Type":"ContainerDied","Data":"c276dd1cf7e3ed08da8320c83c94bd7e506f2ae962be81a3c7d0f78277d62ddd"} Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.435700 4703 scope.go:117] "RemoveContainer" containerID="b1a6187e2c22c0e626d1088348b15d2fe1e5b8be71d34da8ab0aca0cd5dc69d9" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.438174 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.457035 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-combined-ca-bundle\") pod \"41238b51-342b-4a59-b098-01f716f7a865\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.457254 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-config-data\") pod \"41238b51-342b-4a59-b098-01f716f7a865\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.457291 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41238b51-342b-4a59-b098-01f716f7a865-logs\") pod \"41238b51-342b-4a59-b098-01f716f7a865\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.457398 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5msjb\" (UniqueName: \"kubernetes.io/projected/41238b51-342b-4a59-b098-01f716f7a865-kube-api-access-5msjb\") pod \"41238b51-342b-4a59-b098-01f716f7a865\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.457444 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-custom-prometheus-ca\") pod \"41238b51-342b-4a59-b098-01f716f7a865\" (UID: \"41238b51-342b-4a59-b098-01f716f7a865\") " Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.461763 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41238b51-342b-4a59-b098-01f716f7a865-logs" (OuterVolumeSpecName: "logs") pod "41238b51-342b-4a59-b098-01f716f7a865" (UID: "41238b51-342b-4a59-b098-01f716f7a865"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.508906 4703 scope.go:117] "RemoveContainer" containerID="158f5fe6d1c7b33cb21d36308c61f42bbb8ebc6e9fd9564ec1ab6f6dbeeefdf3" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.515014 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41238b51-342b-4a59-b098-01f716f7a865-kube-api-access-5msjb" (OuterVolumeSpecName: "kube-api-access-5msjb") pod "41238b51-342b-4a59-b098-01f716f7a865" (UID: "41238b51-342b-4a59-b098-01f716f7a865"). InnerVolumeSpecName "kube-api-access-5msjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.523068 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "41238b51-342b-4a59-b098-01f716f7a865" (UID: "41238b51-342b-4a59-b098-01f716f7a865"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.549660 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "41238b51-342b-4a59-b098-01f716f7a865" (UID: "41238b51-342b-4a59-b098-01f716f7a865"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.564674 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-ovsdbserver-nb\") pod \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.564790 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-dns-swift-storage-0\") pod \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.564871 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-dns-svc\") pod \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.564915 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-ovsdbserver-sb\") pod \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.564946 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-config\") pod \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.565037 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hd8n5\" (UniqueName: \"kubernetes.io/projected/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-kube-api-access-hd8n5\") pod \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\" (UID: \"638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84\") " Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.565700 4703 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.565720 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.565733 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/41238b51-342b-4a59-b098-01f716f7a865-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.565749 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5msjb\" (UniqueName: \"kubernetes.io/projected/41238b51-342b-4a59-b098-01f716f7a865-kube-api-access-5msjb\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.572618 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-kube-api-access-hd8n5" (OuterVolumeSpecName: "kube-api-access-hd8n5") pod "638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" (UID: "638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84"). InnerVolumeSpecName "kube-api-access-hd8n5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.623491 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-config-data" (OuterVolumeSpecName: "config-data") pod "41238b51-342b-4a59-b098-01f716f7a865" (UID: "41238b51-342b-4a59-b098-01f716f7a865"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.672323 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41238b51-342b-4a59-b098-01f716f7a865-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.672389 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hd8n5\" (UniqueName: \"kubernetes.io/projected/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-kube-api-access-hd8n5\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.818722 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" (UID: "638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:20:22 crc kubenswrapper[4703]: I0130 12:20:22.877174 4703 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.021728 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7946f9d8b5-42hg8"] Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.036637 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-config" (OuterVolumeSpecName: "config") pod "638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" (UID: "638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.077274 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" (UID: "638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:20:23 crc kubenswrapper[4703]: W0130 12:20:23.094987 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod67c4cf21_bf63_4d34_a72a_4e881bcc2c7d.slice/crio-643c826e0a8a698f274d50241f85cad110a1cec6fec0d40e8eb00cce32e7f518 WatchSource:0}: Error finding container 643c826e0a8a698f274d50241f85cad110a1cec6fec0d40e8eb00cce32e7f518: Status 404 returned error can't find the container with id 643c826e0a8a698f274d50241f85cad110a1cec6fec0d40e8eb00cce32e7f518 Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.185954 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" (UID: "638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.212347 4703 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.212736 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.212760 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.232829 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" (UID: "638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.266502 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-676db989fc-4rthl" podUID="62d4e769-8427-473f-8184-be89133bb4bc" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.276641 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-676db989fc-4rthl" podUID="62d4e769-8427-473f-8184-be89133bb4bc" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.278328 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-676db989fc-4rthl" podUID="62d4e769-8427-473f-8184-be89133bb4bc" containerName="neutron-api" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.315693 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.375674 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5bb45bd7f4-hsvwp"] Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.449047 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.460086 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5bb45bd7f4-hsvwp" event={"ID":"ab801f17-5c1d-4e5d-9e0c-24778ca21833","Type":"ContainerStarted","Data":"7ad36517988bcd7730eaad21c1eefca84666c916d03c69f1e8f7c2fa6c9fc510"} Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.470720 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.471708 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7946f9d8b5-42hg8" event={"ID":"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d","Type":"ContainerStarted","Data":"643c826e0a8a698f274d50241f85cad110a1cec6fec0d40e8eb00cce32e7f518"} Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.518372 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-api-0"] Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.526719 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-api-0"] Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.581981 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-api-0"] Jan 30 12:20:23 crc kubenswrapper[4703]: E0130 12:20:23.582554 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" containerName="init" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.582572 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" containerName="init" Jan 30 12:20:23 crc kubenswrapper[4703]: E0130 12:20:23.582589 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41238b51-342b-4a59-b098-01f716f7a865" containerName="watcher-api" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.582595 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="41238b51-342b-4a59-b098-01f716f7a865" containerName="watcher-api" Jan 30 12:20:23 crc kubenswrapper[4703]: E0130 12:20:23.582622 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" containerName="dnsmasq-dns" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.582632 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" containerName="dnsmasq-dns" Jan 30 12:20:23 crc kubenswrapper[4703]: E0130 12:20:23.582654 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41238b51-342b-4a59-b098-01f716f7a865" containerName="watcher-api-log" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.582661 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="41238b51-342b-4a59-b098-01f716f7a865" containerName="watcher-api-log" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.582868 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" containerName="dnsmasq-dns" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.582890 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="41238b51-342b-4a59-b098-01f716f7a865" containerName="watcher-api-log" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.582900 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="41238b51-342b-4a59-b098-01f716f7a865" containerName="watcher-api" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.585645 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.592693 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-internal-svc" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.592925 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-watcher-public-svc" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.593057 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-api-config-data" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.619575 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-mdl7r"] Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.643823 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-785d8bcb8c-mdl7r"] Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.654860 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58251080-4018-4188-9136-fa8e49f90aa3-public-tls-certs\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.654984 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58251080-4018-4188-9136-fa8e49f90aa3-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.655115 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58251080-4018-4188-9136-fa8e49f90aa3-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.655213 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26qd6\" (UniqueName: \"kubernetes.io/projected/58251080-4018-4188-9136-fa8e49f90aa3-kube-api-access-26qd6\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.655253 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58251080-4018-4188-9136-fa8e49f90aa3-config-data\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.655418 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58251080-4018-4188-9136-fa8e49f90aa3-logs\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.655486 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/58251080-4018-4188-9136-fa8e49f90aa3-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.658052 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.758173 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58251080-4018-4188-9136-fa8e49f90aa3-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.758560 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26qd6\" (UniqueName: \"kubernetes.io/projected/58251080-4018-4188-9136-fa8e49f90aa3-kube-api-access-26qd6\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.758589 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58251080-4018-4188-9136-fa8e49f90aa3-config-data\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.758684 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58251080-4018-4188-9136-fa8e49f90aa3-logs\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.758738 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/58251080-4018-4188-9136-fa8e49f90aa3-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.758773 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58251080-4018-4188-9136-fa8e49f90aa3-public-tls-certs\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.758816 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58251080-4018-4188-9136-fa8e49f90aa3-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.759443 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58251080-4018-4188-9136-fa8e49f90aa3-logs\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.778651 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58251080-4018-4188-9136-fa8e49f90aa3-combined-ca-bundle\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.782513 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58251080-4018-4188-9136-fa8e49f90aa3-config-data\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.786225 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/58251080-4018-4188-9136-fa8e49f90aa3-custom-prometheus-ca\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.803245 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58251080-4018-4188-9136-fa8e49f90aa3-public-tls-certs\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.808656 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26qd6\" (UniqueName: \"kubernetes.io/projected/58251080-4018-4188-9136-fa8e49f90aa3-kube-api-access-26qd6\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.809336 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58251080-4018-4188-9136-fa8e49f90aa3-internal-tls-certs\") pod \"watcher-api-0\" (UID: \"58251080-4018-4188-9136-fa8e49f90aa3\") " pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.827673 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.867610 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6ea69f39-6623-4a6e-adff-6014bb7c749b-config-data\") pod \"6ea69f39-6623-4a6e-adff-6014bb7c749b\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.868038 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dvbbp\" (UniqueName: \"kubernetes.io/projected/6ea69f39-6623-4a6e-adff-6014bb7c749b-kube-api-access-dvbbp\") pod \"6ea69f39-6623-4a6e-adff-6014bb7c749b\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.868534 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6ea69f39-6623-4a6e-adff-6014bb7c749b-scripts\") pod \"6ea69f39-6623-4a6e-adff-6014bb7c749b\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.868694 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6ea69f39-6623-4a6e-adff-6014bb7c749b-horizon-secret-key\") pod \"6ea69f39-6623-4a6e-adff-6014bb7c749b\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.868869 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ea69f39-6623-4a6e-adff-6014bb7c749b-logs\") pod \"6ea69f39-6623-4a6e-adff-6014bb7c749b\" (UID: \"6ea69f39-6623-4a6e-adff-6014bb7c749b\") " Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.870269 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ea69f39-6623-4a6e-adff-6014bb7c749b-logs" (OuterVolumeSpecName: "logs") pod "6ea69f39-6623-4a6e-adff-6014bb7c749b" (UID: "6ea69f39-6623-4a6e-adff-6014bb7c749b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.885744 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea69f39-6623-4a6e-adff-6014bb7c749b-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "6ea69f39-6623-4a6e-adff-6014bb7c749b" (UID: "6ea69f39-6623-4a6e-adff-6014bb7c749b"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.903754 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea69f39-6623-4a6e-adff-6014bb7c749b-config-data" (OuterVolumeSpecName: "config-data") pod "6ea69f39-6623-4a6e-adff-6014bb7c749b" (UID: "6ea69f39-6623-4a6e-adff-6014bb7c749b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.904388 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea69f39-6623-4a6e-adff-6014bb7c749b-kube-api-access-dvbbp" (OuterVolumeSpecName: "kube-api-access-dvbbp") pod "6ea69f39-6623-4a6e-adff-6014bb7c749b" (UID: "6ea69f39-6623-4a6e-adff-6014bb7c749b"). InnerVolumeSpecName "kube-api-access-dvbbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.917763 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea69f39-6623-4a6e-adff-6014bb7c749b-scripts" (OuterVolumeSpecName: "scripts") pod "6ea69f39-6623-4a6e-adff-6014bb7c749b" (UID: "6ea69f39-6623-4a6e-adff-6014bb7c749b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.963635 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-api-0" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.972725 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6ea69f39-6623-4a6e-adff-6014bb7c749b-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.973176 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dvbbp\" (UniqueName: \"kubernetes.io/projected/6ea69f39-6623-4a6e-adff-6014bb7c749b-kube-api-access-dvbbp\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.973309 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6ea69f39-6623-4a6e-adff-6014bb7c749b-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.973401 4703 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6ea69f39-6623-4a6e-adff-6014bb7c749b-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:23 crc kubenswrapper[4703]: I0130 12:20:23.973479 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6ea69f39-6623-4a6e-adff-6014bb7c749b-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:24 crc kubenswrapper[4703]: I0130 12:20:24.426400 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="41238b51-342b-4a59-b098-01f716f7a865" containerName="watcher-api" probeResult="failure" output="Get \"http://10.217.0.162:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 12:20:24 crc kubenswrapper[4703]: I0130 12:20:24.426530 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/watcher-api-0" podUID="41238b51-342b-4a59-b098-01f716f7a865" containerName="watcher-api-log" probeResult="failure" output="Get \"http://10.217.0.162:9322/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 12:20:24 crc kubenswrapper[4703]: I0130 12:20:24.484747 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-594746f8cc-b7spb" event={"ID":"6ea69f39-6623-4a6e-adff-6014bb7c749b","Type":"ContainerDied","Data":"a890a73d141d07cd66317fd9f8a89cd5039e4040f5a063824abb086ee524cafc"} Jan 30 12:20:24 crc kubenswrapper[4703]: I0130 12:20:24.484807 4703 scope.go:117] "RemoveContainer" containerID="485b4e337950b032012f2a3276539bbaeec91bf21585acbd7dd0e778d654de83" Jan 30 12:20:24 crc kubenswrapper[4703]: I0130 12:20:24.484871 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-594746f8cc-b7spb" Jan 30 12:20:24 crc kubenswrapper[4703]: I0130 12:20:24.530852 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-594746f8cc-b7spb"] Jan 30 12:20:24 crc kubenswrapper[4703]: I0130 12:20:24.540090 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-594746f8cc-b7spb"] Jan 30 12:20:25 crc kubenswrapper[4703]: I0130 12:20:25.100521 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41238b51-342b-4a59-b098-01f716f7a865" path="/var/lib/kubelet/pods/41238b51-342b-4a59-b098-01f716f7a865/volumes" Jan 30 12:20:25 crc kubenswrapper[4703]: I0130 12:20:25.101727 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" path="/var/lib/kubelet/pods/638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84/volumes" Jan 30 12:20:25 crc kubenswrapper[4703]: I0130 12:20:25.102721 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea69f39-6623-4a6e-adff-6014bb7c749b" path="/var/lib/kubelet/pods/6ea69f39-6623-4a6e-adff-6014bb7c749b/volumes" Jan 30 12:20:26 crc kubenswrapper[4703]: I0130 12:20:26.668074 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-785d8bcb8c-mdl7r" podUID="638f2b9b-25b1-40b5-bb6a-78a9e5e8fa84" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.153:5353: i/o timeout" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.805254 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.812086 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.833450 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f15d17d2-e8d6-49c8-a313-d815865271f0-logs\") pod \"f15d17d2-e8d6-49c8-a313-d815865271f0\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.833975 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wdltv\" (UniqueName: \"kubernetes.io/projected/f15d17d2-e8d6-49c8-a313-d815865271f0-kube-api-access-wdltv\") pod \"f15d17d2-e8d6-49c8-a313-d815865271f0\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.834005 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f15d17d2-e8d6-49c8-a313-d815865271f0-config-data\") pod \"f15d17d2-e8d6-49c8-a313-d815865271f0\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.834077 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f15d17d2-e8d6-49c8-a313-d815865271f0-scripts\") pod \"f15d17d2-e8d6-49c8-a313-d815865271f0\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.834234 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f15d17d2-e8d6-49c8-a313-d815865271f0-horizon-secret-key\") pod \"f15d17d2-e8d6-49c8-a313-d815865271f0\" (UID: \"f15d17d2-e8d6-49c8-a313-d815865271f0\") " Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.837455 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f15d17d2-e8d6-49c8-a313-d815865271f0-logs" (OuterVolumeSpecName: "logs") pod "f15d17d2-e8d6-49c8-a313-d815865271f0" (UID: "f15d17d2-e8d6-49c8-a313-d815865271f0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.843762 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f15d17d2-e8d6-49c8-a313-d815865271f0-kube-api-access-wdltv" (OuterVolumeSpecName: "kube-api-access-wdltv") pod "f15d17d2-e8d6-49c8-a313-d815865271f0" (UID: "f15d17d2-e8d6-49c8-a313-d815865271f0"). InnerVolumeSpecName "kube-api-access-wdltv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.871516 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f15d17d2-e8d6-49c8-a313-d815865271f0-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "f15d17d2-e8d6-49c8-a313-d815865271f0" (UID: "f15d17d2-e8d6-49c8-a313-d815865271f0"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.880628 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f15d17d2-e8d6-49c8-a313-d815865271f0-scripts" (OuterVolumeSpecName: "scripts") pod "f15d17d2-e8d6-49c8-a313-d815865271f0" (UID: "f15d17d2-e8d6-49c8-a313-d815865271f0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.923016 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f15d17d2-e8d6-49c8-a313-d815865271f0-config-data" (OuterVolumeSpecName: "config-data") pod "f15d17d2-e8d6-49c8-a313-d815865271f0" (UID: "f15d17d2-e8d6-49c8-a313-d815865271f0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.925553 4703 scope.go:117] "RemoveContainer" containerID="b36c10043d0ee087fff4101d27a6cffd2fd7a36b90013c6d1dc568e780c494cc" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.936573 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2ac378ee-c96a-41cb-881b-0100056b27ad-scripts\") pod \"2ac378ee-c96a-41cb-881b-0100056b27ad\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.936717 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ac378ee-c96a-41cb-881b-0100056b27ad-logs\") pod \"2ac378ee-c96a-41cb-881b-0100056b27ad\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.936846 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2ac378ee-c96a-41cb-881b-0100056b27ad-horizon-secret-key\") pod \"2ac378ee-c96a-41cb-881b-0100056b27ad\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.936953 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2ac378ee-c96a-41cb-881b-0100056b27ad-config-data\") pod \"2ac378ee-c96a-41cb-881b-0100056b27ad\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.937109 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gkld4\" (UniqueName: \"kubernetes.io/projected/2ac378ee-c96a-41cb-881b-0100056b27ad-kube-api-access-gkld4\") pod \"2ac378ee-c96a-41cb-881b-0100056b27ad\" (UID: \"2ac378ee-c96a-41cb-881b-0100056b27ad\") " Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.937516 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ac378ee-c96a-41cb-881b-0100056b27ad-logs" (OuterVolumeSpecName: "logs") pod "2ac378ee-c96a-41cb-881b-0100056b27ad" (UID: "2ac378ee-c96a-41cb-881b-0100056b27ad"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.938412 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wdltv\" (UniqueName: \"kubernetes.io/projected/f15d17d2-e8d6-49c8-a313-d815865271f0-kube-api-access-wdltv\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.938523 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f15d17d2-e8d6-49c8-a313-d815865271f0-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.938607 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f15d17d2-e8d6-49c8-a313-d815865271f0-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.938691 4703 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/f15d17d2-e8d6-49c8-a313-d815865271f0-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.938789 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f15d17d2-e8d6-49c8-a313-d815865271f0-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.938873 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ac378ee-c96a-41cb-881b-0100056b27ad-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.941033 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ac378ee-c96a-41cb-881b-0100056b27ad-kube-api-access-gkld4" (OuterVolumeSpecName: "kube-api-access-gkld4") pod "2ac378ee-c96a-41cb-881b-0100056b27ad" (UID: "2ac378ee-c96a-41cb-881b-0100056b27ad"). InnerVolumeSpecName "kube-api-access-gkld4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.942480 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ac378ee-c96a-41cb-881b-0100056b27ad-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "2ac378ee-c96a-41cb-881b-0100056b27ad" (UID: "2ac378ee-c96a-41cb-881b-0100056b27ad"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.963040 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ac378ee-c96a-41cb-881b-0100056b27ad-config-data" (OuterVolumeSpecName: "config-data") pod "2ac378ee-c96a-41cb-881b-0100056b27ad" (UID: "2ac378ee-c96a-41cb-881b-0100056b27ad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:20:30 crc kubenswrapper[4703]: I0130 12:20:30.963049 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ac378ee-c96a-41cb-881b-0100056b27ad-scripts" (OuterVolumeSpecName: "scripts") pod "2ac378ee-c96a-41cb-881b-0100056b27ad" (UID: "2ac378ee-c96a-41cb-881b-0100056b27ad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:20:31 crc kubenswrapper[4703]: I0130 12:20:31.040534 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2ac378ee-c96a-41cb-881b-0100056b27ad-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:31 crc kubenswrapper[4703]: I0130 12:20:31.040582 4703 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/2ac378ee-c96a-41cb-881b-0100056b27ad-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:31 crc kubenswrapper[4703]: I0130 12:20:31.040595 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2ac378ee-c96a-41cb-881b-0100056b27ad-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:31 crc kubenswrapper[4703]: I0130 12:20:31.040605 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gkld4\" (UniqueName: \"kubernetes.io/projected/2ac378ee-c96a-41cb-881b-0100056b27ad-kube-api-access-gkld4\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:31 crc kubenswrapper[4703]: I0130 12:20:31.569203 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5bbf599fb5-drlk7" Jan 30 12:20:31 crc kubenswrapper[4703]: I0130 12:20:31.569183 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5bbf599fb5-drlk7" event={"ID":"2ac378ee-c96a-41cb-881b-0100056b27ad","Type":"ContainerDied","Data":"4d06a5d6bf066d28fa37820403815da2077b5c8170ade5776487434b0130eb74"} Jan 30 12:20:31 crc kubenswrapper[4703]: I0130 12:20:31.569894 4703 scope.go:117] "RemoveContainer" containerID="6292b0000982418134967dc906695bb7c653583af79e35e4f551aa6955384f96" Jan 30 12:20:31 crc kubenswrapper[4703]: I0130 12:20:31.578930 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-85698bccdf-jvv85" event={"ID":"f15d17d2-e8d6-49c8-a313-d815865271f0","Type":"ContainerDied","Data":"c705c7a0f3a02f2118d4b0fa102e7bc7394b936a8c5c4cd901d4b4aa2bf61ce4"} Jan 30 12:20:31 crc kubenswrapper[4703]: I0130 12:20:31.579077 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-85698bccdf-jvv85" Jan 30 12:20:31 crc kubenswrapper[4703]: I0130 12:20:31.613764 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5bbf599fb5-drlk7"] Jan 30 12:20:31 crc kubenswrapper[4703]: I0130 12:20:31.624660 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5bbf599fb5-drlk7"] Jan 30 12:20:31 crc kubenswrapper[4703]: I0130 12:20:31.638606 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-85698bccdf-jvv85"] Jan 30 12:20:31 crc kubenswrapper[4703]: I0130 12:20:31.649097 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-85698bccdf-jvv85"] Jan 30 12:20:31 crc kubenswrapper[4703]: I0130 12:20:31.777592 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-api-0"] Jan 30 12:20:31 crc kubenswrapper[4703]: I0130 12:20:31.811480 4703 scope.go:117] "RemoveContainer" containerID="8dc1baf060a43497da0bae36974ca6a96708b479d00c21dddb4892a9ac9ef5b5" Jan 30 12:20:31 crc kubenswrapper[4703]: I0130 12:20:31.886530 4703 scope.go:117] "RemoveContainer" containerID="3ba1d723bd951ffe42ed8058041695297f6d9eabdf3ecf2e535f33372bf4a446" Jan 30 12:20:32 crc kubenswrapper[4703]: I0130 12:20:32.133110 4703 scope.go:117] "RemoveContainer" containerID="c9d34830c6c9528c2db9a2b08e868b06714aeb34fe386462412ba5b2fc0fcb0b" Jan 30 12:20:32 crc kubenswrapper[4703]: I0130 12:20:32.594386 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"58251080-4018-4188-9136-fa8e49f90aa3","Type":"ContainerStarted","Data":"7d4c6bfdd4d1f97a2518bb3b2b9ef9b33b1814e3e7b130a8399ecc8c9de0994f"} Jan 30 12:20:33 crc kubenswrapper[4703]: I0130 12:20:33.100817 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ac378ee-c96a-41cb-881b-0100056b27ad" path="/var/lib/kubelet/pods/2ac378ee-c96a-41cb-881b-0100056b27ad/volumes" Jan 30 12:20:33 crc kubenswrapper[4703]: I0130 12:20:33.102054 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f15d17d2-e8d6-49c8-a313-d815865271f0" path="/var/lib/kubelet/pods/f15d17d2-e8d6-49c8-a313-d815865271f0/volumes" Jan 30 12:20:34 crc kubenswrapper[4703]: I0130 12:20:34.622916 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5bb45bd7f4-hsvwp" event={"ID":"ab801f17-5c1d-4e5d-9e0c-24778ca21833","Type":"ContainerStarted","Data":"090c37abe876cd3e13bc6c7c8571a2797a737e5e31ef0dcabbf5a87a33afe9d3"} Jan 30 12:20:34 crc kubenswrapper[4703]: I0130 12:20:34.625809 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7946f9d8b5-42hg8" event={"ID":"67c4cf21-bf63-4d34-a72a-4e881bcc2c7d","Type":"ContainerStarted","Data":"32de908354f1d3e2a06c22c9769b95ed5c4b1aca8bdacf49ca91a0e8adad3048"} Jan 30 12:20:34 crc kubenswrapper[4703]: I0130 12:20:34.629344 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-scrgs" event={"ID":"1665113c-fcaa-4a13-9de2-552579864e44","Type":"ContainerStarted","Data":"9da643b5dca57ef27b2b62dc081db040f72572014de6521b390288855be18261"} Jan 30 12:20:34 crc kubenswrapper[4703]: I0130 12:20:34.631994 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-vkzk9" event={"ID":"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70","Type":"ContainerStarted","Data":"015a87566bb4a87ce6fd94f2a64e317d0a52bcdbe92e15a06a75e4ce2b5ac316"} Jan 30 12:20:34 crc kubenswrapper[4703]: I0130 12:20:34.635393 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"58251080-4018-4188-9136-fa8e49f90aa3","Type":"ContainerStarted","Data":"a8679231a1ae0c41dc3b8dc0b43398822727bed509f2aba64b70b95297bdf5bd"} Jan 30 12:20:34 crc kubenswrapper[4703]: I0130 12:20:34.666818 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-vkzk9" podStartSLOduration=16.0155184 podStartE2EDuration="1m44.666785512s" podCreationTimestamp="2026-01-30 12:18:50 +0000 UTC" firstStartedPulling="2026-01-30 12:18:53.562614016 +0000 UTC m=+1369.340435670" lastFinishedPulling="2026-01-30 12:20:22.213881128 +0000 UTC m=+1457.991702782" observedRunningTime="2026-01-30 12:20:34.657850459 +0000 UTC m=+1470.435672113" watchObservedRunningTime="2026-01-30 12:20:34.666785512 +0000 UTC m=+1470.444607166" Jan 30 12:20:35 crc kubenswrapper[4703]: E0130 12:20:35.431355 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/sg-core:latest" Jan 30 12:20:35 crc kubenswrapper[4703]: E0130 12:20:35.431571 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:sg-core,Image:quay.io/openstack-k8s-operators/sg-core:latest,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:sg-core-conf-yaml,ReadOnly:false,MountPath:/etc/sg-core.conf.yaml,SubPath:sg-core.conf.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9gzxc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(572a81eb-68df-470a-9ca8-1febfc6167ad): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:20:35 crc kubenswrapper[4703]: I0130 12:20:35.437261 4703 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 12:20:35 crc kubenswrapper[4703]: I0130 12:20:35.656146 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:20:35 crc kubenswrapper[4703]: I0130 12:20:35.691899 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-7946f9d8b5-42hg8" podStartSLOduration=36.691862149 podStartE2EDuration="36.691862149s" podCreationTimestamp="2026-01-30 12:19:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:20:35.68187908 +0000 UTC m=+1471.459700734" watchObservedRunningTime="2026-01-30 12:20:35.691862149 +0000 UTC m=+1471.469683803" Jan 30 12:20:35 crc kubenswrapper[4703]: I0130 12:20:35.715461 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-scrgs" podStartSLOduration=17.063801803 podStartE2EDuration="1m46.715426642s" podCreationTimestamp="2026-01-30 12:18:49 +0000 UTC" firstStartedPulling="2026-01-30 12:18:52.650904552 +0000 UTC m=+1368.428726206" lastFinishedPulling="2026-01-30 12:20:22.302529391 +0000 UTC m=+1458.080351045" observedRunningTime="2026-01-30 12:20:35.702776603 +0000 UTC m=+1471.480598257" watchObservedRunningTime="2026-01-30 12:20:35.715426642 +0000 UTC m=+1471.493248296" Jan 30 12:20:36 crc kubenswrapper[4703]: I0130 12:20:36.669992 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-api-0" event={"ID":"58251080-4018-4188-9136-fa8e49f90aa3","Type":"ContainerStarted","Data":"1c27f7d247c107462688117b6d3ebe67ae903be603fc65d8348e82a3c856d071"} Jan 30 12:20:36 crc kubenswrapper[4703]: I0130 12:20:36.671338 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Jan 30 12:20:36 crc kubenswrapper[4703]: I0130 12:20:36.676592 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5bb45bd7f4-hsvwp" event={"ID":"ab801f17-5c1d-4e5d-9e0c-24778ca21833","Type":"ContainerStarted","Data":"22d4f6e993d118fd593bf2f5c810e7c952b6c898f17729d7e052479f1a3b2e89"} Jan 30 12:20:36 crc kubenswrapper[4703]: I0130 12:20:36.676671 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:36 crc kubenswrapper[4703]: I0130 12:20:36.676709 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:36 crc kubenswrapper[4703]: I0130 12:20:36.712910 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-api-0" podStartSLOduration=13.712879452 podStartE2EDuration="13.712879452s" podCreationTimestamp="2026-01-30 12:20:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:20:36.698282013 +0000 UTC m=+1472.476103677" watchObservedRunningTime="2026-01-30 12:20:36.712879452 +0000 UTC m=+1472.490701096" Jan 30 12:20:36 crc kubenswrapper[4703]: I0130 12:20:36.746931 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5bb45bd7f4-hsvwp" podStartSLOduration=15.746893035 podStartE2EDuration="15.746893035s" podCreationTimestamp="2026-01-30 12:20:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:20:36.731407383 +0000 UTC m=+1472.509229057" watchObservedRunningTime="2026-01-30 12:20:36.746893035 +0000 UTC m=+1472.524714709" Jan 30 12:20:38 crc kubenswrapper[4703]: I0130 12:20:38.964868 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-api-0" Jan 30 12:20:38 crc kubenswrapper[4703]: I0130 12:20:38.965460 4703 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 12:20:39 crc kubenswrapper[4703]: I0130 12:20:39.163895 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Jan 30 12:20:42 crc kubenswrapper[4703]: I0130 12:20:42.822942 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:20:42 crc kubenswrapper[4703]: I0130 12:20:42.823611 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:20:43 crc kubenswrapper[4703]: I0130 12:20:43.964633 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-api-0" Jan 30 12:20:43 crc kubenswrapper[4703]: I0130 12:20:43.976337 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-api-0" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.763210 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rs9pr"] Jan 30 12:20:44 crc kubenswrapper[4703]: E0130 12:20:44.763983 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ea69f39-6623-4a6e-adff-6014bb7c749b" containerName="horizon" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.764016 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ea69f39-6623-4a6e-adff-6014bb7c749b" containerName="horizon" Jan 30 12:20:44 crc kubenswrapper[4703]: E0130 12:20:44.764052 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ac378ee-c96a-41cb-881b-0100056b27ad" containerName="horizon-log" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.764062 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ac378ee-c96a-41cb-881b-0100056b27ad" containerName="horizon-log" Jan 30 12:20:44 crc kubenswrapper[4703]: E0130 12:20:44.764091 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ea69f39-6623-4a6e-adff-6014bb7c749b" containerName="horizon-log" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.764101 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ea69f39-6623-4a6e-adff-6014bb7c749b" containerName="horizon-log" Jan 30 12:20:44 crc kubenswrapper[4703]: E0130 12:20:44.764138 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ac378ee-c96a-41cb-881b-0100056b27ad" containerName="horizon" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.764145 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ac378ee-c96a-41cb-881b-0100056b27ad" containerName="horizon" Jan 30 12:20:44 crc kubenswrapper[4703]: E0130 12:20:44.764169 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f15d17d2-e8d6-49c8-a313-d815865271f0" containerName="horizon" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.764177 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f15d17d2-e8d6-49c8-a313-d815865271f0" containerName="horizon" Jan 30 12:20:44 crc kubenswrapper[4703]: E0130 12:20:44.764193 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f15d17d2-e8d6-49c8-a313-d815865271f0" containerName="horizon-log" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.764201 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f15d17d2-e8d6-49c8-a313-d815865271f0" containerName="horizon-log" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.764398 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="f15d17d2-e8d6-49c8-a313-d815865271f0" containerName="horizon-log" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.764414 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ac378ee-c96a-41cb-881b-0100056b27ad" containerName="horizon" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.764427 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ac378ee-c96a-41cb-881b-0100056b27ad" containerName="horizon-log" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.764440 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ea69f39-6623-4a6e-adff-6014bb7c749b" containerName="horizon-log" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.764449 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="f15d17d2-e8d6-49c8-a313-d815865271f0" containerName="horizon" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.764463 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ea69f39-6623-4a6e-adff-6014bb7c749b" containerName="horizon" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.766199 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rs9pr" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.788843 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rs9pr"] Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.833381 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-api-0" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.861945 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-catalog-content\") pod \"redhat-operators-rs9pr\" (UID: \"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7\") " pod="openshift-marketplace/redhat-operators-rs9pr" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.862109 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vd9rn\" (UniqueName: \"kubernetes.io/projected/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-kube-api-access-vd9rn\") pod \"redhat-operators-rs9pr\" (UID: \"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7\") " pod="openshift-marketplace/redhat-operators-rs9pr" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.862154 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-utilities\") pod \"redhat-operators-rs9pr\" (UID: \"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7\") " pod="openshift-marketplace/redhat-operators-rs9pr" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.964443 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-utilities\") pod \"redhat-operators-rs9pr\" (UID: \"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7\") " pod="openshift-marketplace/redhat-operators-rs9pr" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.964620 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-catalog-content\") pod \"redhat-operators-rs9pr\" (UID: \"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7\") " pod="openshift-marketplace/redhat-operators-rs9pr" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.964857 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vd9rn\" (UniqueName: \"kubernetes.io/projected/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-kube-api-access-vd9rn\") pod \"redhat-operators-rs9pr\" (UID: \"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7\") " pod="openshift-marketplace/redhat-operators-rs9pr" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.965195 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-utilities\") pod \"redhat-operators-rs9pr\" (UID: \"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7\") " pod="openshift-marketplace/redhat-operators-rs9pr" Jan 30 12:20:44 crc kubenswrapper[4703]: I0130 12:20:44.965631 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-catalog-content\") pod \"redhat-operators-rs9pr\" (UID: \"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7\") " pod="openshift-marketplace/redhat-operators-rs9pr" Jan 30 12:20:45 crc kubenswrapper[4703]: I0130 12:20:45.002195 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vd9rn\" (UniqueName: \"kubernetes.io/projected/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-kube-api-access-vd9rn\") pod \"redhat-operators-rs9pr\" (UID: \"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7\") " pod="openshift-marketplace/redhat-operators-rs9pr" Jan 30 12:20:45 crc kubenswrapper[4703]: I0130 12:20:45.109984 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rs9pr" Jan 30 12:20:49 crc kubenswrapper[4703]: E0130 12:20:49.799767 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"sg-core\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack/ceilometer-0" podUID="572a81eb-68df-470a-9ca8-1febfc6167ad" Jan 30 12:20:49 crc kubenswrapper[4703]: I0130 12:20:49.814508 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rs9pr"] Jan 30 12:20:49 crc kubenswrapper[4703]: W0130 12:20:49.819071 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7d3ec3ba_8eec_4f98_8948_6cf24d5638b7.slice/crio-2f9f47d26b6c056d8b2354e5fefd86ff9110ab9e314c17600789a2e9393a6153 WatchSource:0}: Error finding container 2f9f47d26b6c056d8b2354e5fefd86ff9110ab9e314c17600789a2e9393a6153: Status 404 returned error can't find the container with id 2f9f47d26b6c056d8b2354e5fefd86ff9110ab9e314c17600789a2e9393a6153 Jan 30 12:20:49 crc kubenswrapper[4703]: I0130 12:20:49.889424 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"572a81eb-68df-470a-9ca8-1febfc6167ad","Type":"ContainerStarted","Data":"e45423bb48b132ba7e486e33996b065b164471f574f6dbe34b326e3be4a01e94"} Jan 30 12:20:49 crc kubenswrapper[4703]: I0130 12:20:49.889622 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 30 12:20:49 crc kubenswrapper[4703]: I0130 12:20:49.889680 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="572a81eb-68df-470a-9ca8-1febfc6167ad" containerName="proxy-httpd" containerID="cri-o://e45423bb48b132ba7e486e33996b065b164471f574f6dbe34b326e3be4a01e94" gracePeriod=30 Jan 30 12:20:49 crc kubenswrapper[4703]: I0130 12:20:49.889680 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="572a81eb-68df-470a-9ca8-1febfc6167ad" containerName="ceilometer-notification-agent" containerID="cri-o://37a63d0f9e4cd08d66ff7b2a23c08291f9552b813721b66cae5547fe2c7dfabb" gracePeriod=30 Jan 30 12:20:49 crc kubenswrapper[4703]: I0130 12:20:49.895812 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rs9pr" event={"ID":"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7","Type":"ContainerStarted","Data":"2f9f47d26b6c056d8b2354e5fefd86ff9110ab9e314c17600789a2e9393a6153"} Jan 30 12:20:50 crc kubenswrapper[4703]: I0130 12:20:50.186557 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-85fd6c789b-jdlgt" podUID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 30 12:20:50 crc kubenswrapper[4703]: I0130 12:20:50.186924 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-85fd6c789b-jdlgt" podUID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" containerName="neutron-api" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 30 12:20:50 crc kubenswrapper[4703]: I0130 12:20:50.188086 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/neutron-85fd6c789b-jdlgt" podUID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" containerName="neutron-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 30 12:20:50 crc kubenswrapper[4703]: I0130 12:20:50.909579 4703 generic.go:334] "Generic (PLEG): container finished" podID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerID="44bd0eeab3c7b4371ebc41e5f7618026824b17804f7a8c0f1225abd278d6ca11" exitCode=137 Jan 30 12:20:50 crc kubenswrapper[4703]: I0130 12:20:50.909660 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5f9958979d-8h859" event={"ID":"b888ea51-970d-4f4d-9e5c-f456ca173472","Type":"ContainerDied","Data":"44bd0eeab3c7b4371ebc41e5f7618026824b17804f7a8c0f1225abd278d6ca11"} Jan 30 12:20:50 crc kubenswrapper[4703]: I0130 12:20:50.912777 4703 generic.go:334] "Generic (PLEG): container finished" podID="7d3ec3ba-8eec-4f98-8948-6cf24d5638b7" containerID="f0bc4d0cf50dadc27cbff3e7da907925bf792d2179e6e754dccf330f69c8291b" exitCode=0 Jan 30 12:20:50 crc kubenswrapper[4703]: I0130 12:20:50.912817 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rs9pr" event={"ID":"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7","Type":"ContainerDied","Data":"f0bc4d0cf50dadc27cbff3e7da907925bf792d2179e6e754dccf330f69c8291b"} Jan 30 12:20:51 crc kubenswrapper[4703]: I0130 12:20:51.926273 4703 generic.go:334] "Generic (PLEG): container finished" podID="572a81eb-68df-470a-9ca8-1febfc6167ad" containerID="37a63d0f9e4cd08d66ff7b2a23c08291f9552b813721b66cae5547fe2c7dfabb" exitCode=0 Jan 30 12:20:51 crc kubenswrapper[4703]: I0130 12:20:51.926403 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"572a81eb-68df-470a-9ca8-1febfc6167ad","Type":"ContainerDied","Data":"37a63d0f9e4cd08d66ff7b2a23c08291f9552b813721b66cae5547fe2c7dfabb"} Jan 30 12:20:51 crc kubenswrapper[4703]: I0130 12:20:51.931469 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5f9958979d-8h859" event={"ID":"b888ea51-970d-4f4d-9e5c-f456ca173472","Type":"ContainerStarted","Data":"d757dd822a89b950ce6e9d4dc97199e2572ee94ccf2beca2ecace35453f877f7"} Jan 30 12:20:51 crc kubenswrapper[4703]: I0130 12:20:51.936245 4703 generic.go:334] "Generic (PLEG): container finished" podID="9c6d3262-7469-45ac-b5c8-9eb0f9456a5a" containerID="a29c27b7fb233396218ace770f8222710c040473791f84a186e8849760d220e4" exitCode=137 Jan 30 12:20:51 crc kubenswrapper[4703]: I0130 12:20:51.936314 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77fb4cf9b8-pw692" event={"ID":"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a","Type":"ContainerDied","Data":"a29c27b7fb233396218ace770f8222710c040473791f84a186e8849760d220e4"} Jan 30 12:20:52 crc kubenswrapper[4703]: I0130 12:20:52.949692 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77fb4cf9b8-pw692" event={"ID":"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a","Type":"ContainerStarted","Data":"fda2400a9ade21756ca56d52218ebcddb5692194c691d158ef06b11e6b0694d2"} Jan 30 12:20:54 crc kubenswrapper[4703]: I0130 12:20:54.978414 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rs9pr" event={"ID":"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7","Type":"ContainerStarted","Data":"e8a16a34faaf526bb60f2149ff055b4011dee35837306d7acf0f3543329b1789"} Jan 30 12:20:55 crc kubenswrapper[4703]: I0130 12:20:55.594740 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:55 crc kubenswrapper[4703]: I0130 12:20:55.896444 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5bb45bd7f4-hsvwp" Jan 30 12:20:56 crc kubenswrapper[4703]: E0130 12:20:56.803377 4703 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7d3ec3ba_8eec_4f98_8948_6cf24d5638b7.slice/crio-conmon-e8a16a34faaf526bb60f2149ff055b4011dee35837306d7acf0f3543329b1789.scope\": RecentStats: unable to find data in memory cache]" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.014615 4703 generic.go:334] "Generic (PLEG): container finished" podID="7d3ec3ba-8eec-4f98-8948-6cf24d5638b7" containerID="e8a16a34faaf526bb60f2149ff055b4011dee35837306d7acf0f3543329b1789" exitCode=0 Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.014689 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rs9pr" event={"ID":"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7","Type":"ContainerDied","Data":"e8a16a34faaf526bb60f2149ff055b4011dee35837306d7acf0f3543329b1789"} Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.280045 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.353487 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-85fd6c789b-jdlgt"] Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.353865 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-85fd6c789b-jdlgt" podUID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" containerName="neutron-api" containerID="cri-o://7cac79a2b4c4c93950b47a0d1e832c934582542d6673b18d7481bb7237de22c3" gracePeriod=30 Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.354047 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-85fd6c789b-jdlgt" podUID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" containerName="neutron-httpd" containerID="cri-o://f78b0f9d84606f39b963ac7f2e9ea287421ab9ebf7d40a6a73dbb116f96c8667" gracePeriod=30 Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.371782 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-85fd6c789b-jdlgt" podUID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" containerName="neutron-httpd" probeResult="failure" output="Get \"http://10.217.0.168:9696/\": EOF" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.636023 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7ccfb848f-k68gw"] Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.639062 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.667437 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-public-tls-certs\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.667509 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-internal-tls-certs\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.667656 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-combined-ca-bundle\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.667742 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-ovndb-tls-certs\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.668068 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-httpd-config\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.668186 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfz7w\" (UniqueName: \"kubernetes.io/projected/68a6efb3-cffb-4397-922b-c8486b399f76-kube-api-access-bfz7w\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.668352 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-config\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.680928 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7ccfb848f-k68gw"] Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.771727 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-config\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.772190 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-public-tls-certs\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.772452 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-internal-tls-certs\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.772632 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-combined-ca-bundle\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.772853 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-ovndb-tls-certs\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.773076 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-httpd-config\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.773603 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfz7w\" (UniqueName: \"kubernetes.io/projected/68a6efb3-cffb-4397-922b-c8486b399f76-kube-api-access-bfz7w\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.784110 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-combined-ca-bundle\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.784611 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-config\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.784160 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-httpd-config\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.784971 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-ovndb-tls-certs\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.790560 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-internal-tls-certs\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.790917 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/68a6efb3-cffb-4397-922b-c8486b399f76-public-tls-certs\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:57 crc kubenswrapper[4703]: I0130 12:20:57.799866 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfz7w\" (UniqueName: \"kubernetes.io/projected/68a6efb3-cffb-4397-922b-c8486b399f76-kube-api-access-bfz7w\") pod \"neutron-7ccfb848f-k68gw\" (UID: \"68a6efb3-cffb-4397-922b-c8486b399f76\") " pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:58 crc kubenswrapper[4703]: I0130 12:20:58.034354 4703 generic.go:334] "Generic (PLEG): container finished" podID="b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70" containerID="015a87566bb4a87ce6fd94f2a64e317d0a52bcdbe92e15a06a75e4ce2b5ac316" exitCode=0 Jan 30 12:20:58 crc kubenswrapper[4703]: I0130 12:20:58.034465 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-vkzk9" event={"ID":"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70","Type":"ContainerDied","Data":"015a87566bb4a87ce6fd94f2a64e317d0a52bcdbe92e15a06a75e4ce2b5ac316"} Jan 30 12:20:58 crc kubenswrapper[4703]: I0130 12:20:58.037170 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85fd6c789b-jdlgt" event={"ID":"ac2f050c-f8d0-4ae5-9205-17e00f283e4b","Type":"ContainerDied","Data":"f78b0f9d84606f39b963ac7f2e9ea287421ab9ebf7d40a6a73dbb116f96c8667"} Jan 30 12:20:58 crc kubenswrapper[4703]: I0130 12:20:58.037113 4703 generic.go:334] "Generic (PLEG): container finished" podID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" containerID="f78b0f9d84606f39b963ac7f2e9ea287421ab9ebf7d40a6a73dbb116f96c8667" exitCode=0 Jan 30 12:20:58 crc kubenswrapper[4703]: I0130 12:20:58.041646 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rs9pr" event={"ID":"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7","Type":"ContainerStarted","Data":"70f5c889b4c2bd9dd1520ea57b9c934dadf736c579867781c71e8645c9b8cca7"} Jan 30 12:20:58 crc kubenswrapper[4703]: I0130 12:20:58.048216 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:20:58 crc kubenswrapper[4703]: I0130 12:20:58.090699 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rs9pr" podStartSLOduration=7.532394881 podStartE2EDuration="14.090670953s" podCreationTimestamp="2026-01-30 12:20:44 +0000 UTC" firstStartedPulling="2026-01-30 12:20:50.914987733 +0000 UTC m=+1486.692809387" lastFinishedPulling="2026-01-30 12:20:57.473263805 +0000 UTC m=+1493.251085459" observedRunningTime="2026-01-30 12:20:58.08591143 +0000 UTC m=+1493.863733094" watchObservedRunningTime="2026-01-30 12:20:58.090670953 +0000 UTC m=+1493.868492607" Jan 30 12:20:58 crc kubenswrapper[4703]: I0130 12:20:58.587871 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7ccfb848f-k68gw"] Jan 30 12:20:58 crc kubenswrapper[4703]: W0130 12:20:58.597300 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod68a6efb3_cffb_4397_922b_c8486b399f76.slice/crio-da5aeac035a0317124dbff46a42a7b466da84c46b7ad42d22f4a2e75a13de9ca WatchSource:0}: Error finding container da5aeac035a0317124dbff46a42a7b466da84c46b7ad42d22f4a2e75a13de9ca: Status 404 returned error can't find the container with id da5aeac035a0317124dbff46a42a7b466da84c46b7ad42d22f4a2e75a13de9ca Jan 30 12:20:59 crc kubenswrapper[4703]: I0130 12:20:59.071389 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ccfb848f-k68gw" event={"ID":"68a6efb3-cffb-4397-922b-c8486b399f76","Type":"ContainerStarted","Data":"457a381cb779d80f46d710fab7789935dfb15507d38a8d12f78bdb48483c054f"} Jan 30 12:20:59 crc kubenswrapper[4703]: I0130 12:20:59.072290 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ccfb848f-k68gw" event={"ID":"68a6efb3-cffb-4397-922b-c8486b399f76","Type":"ContainerStarted","Data":"da5aeac035a0317124dbff46a42a7b466da84c46b7ad42d22f4a2e75a13de9ca"} Jan 30 12:20:59 crc kubenswrapper[4703]: I0130 12:20:59.076017 4703 generic.go:334] "Generic (PLEG): container finished" podID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" containerID="7cac79a2b4c4c93950b47a0d1e832c934582542d6673b18d7481bb7237de22c3" exitCode=0 Jan 30 12:20:59 crc kubenswrapper[4703]: I0130 12:20:59.076117 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85fd6c789b-jdlgt" event={"ID":"ac2f050c-f8d0-4ae5-9205-17e00f283e4b","Type":"ContainerDied","Data":"7cac79a2b4c4c93950b47a0d1e832c934582542d6673b18d7481bb7237de22c3"} Jan 30 12:20:59 crc kubenswrapper[4703]: I0130 12:20:59.489393 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-vkzk9" Jan 30 12:20:59 crc kubenswrapper[4703]: I0130 12:20:59.649069 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmlb5\" (UniqueName: \"kubernetes.io/projected/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-kube-api-access-xmlb5\") pod \"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70\" (UID: \"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70\") " Jan 30 12:20:59 crc kubenswrapper[4703]: I0130 12:20:59.649314 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-db-sync-config-data\") pod \"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70\" (UID: \"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70\") " Jan 30 12:20:59 crc kubenswrapper[4703]: I0130 12:20:59.649580 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-combined-ca-bundle\") pod \"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70\" (UID: \"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70\") " Jan 30 12:20:59 crc kubenswrapper[4703]: I0130 12:20:59.683953 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70" (UID: "b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:20:59 crc kubenswrapper[4703]: I0130 12:20:59.692570 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-kube-api-access-xmlb5" (OuterVolumeSpecName: "kube-api-access-xmlb5") pod "b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70" (UID: "b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70"). InnerVolumeSpecName "kube-api-access-xmlb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:20:59 crc kubenswrapper[4703]: I0130 12:20:59.765501 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmlb5\" (UniqueName: \"kubernetes.io/projected/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-kube-api-access-xmlb5\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:59 crc kubenswrapper[4703]: I0130 12:20:59.765557 4703 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:59 crc kubenswrapper[4703]: I0130 12:20:59.793337 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70" (UID: "b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:20:59 crc kubenswrapper[4703]: I0130 12:20:59.868818 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:20:59 crc kubenswrapper[4703]: I0130 12:20:59.948503 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.072194 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-combined-ca-bundle\") pod \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.072364 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-config\") pod \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.072528 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-httpd-config\") pod \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.072650 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nbpzz\" (UniqueName: \"kubernetes.io/projected/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-kube-api-access-nbpzz\") pod \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.072713 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-ovndb-tls-certs\") pod \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\" (UID: \"ac2f050c-f8d0-4ae5-9205-17e00f283e4b\") " Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.078952 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "ac2f050c-f8d0-4ae5-9205-17e00f283e4b" (UID: "ac2f050c-f8d0-4ae5-9205-17e00f283e4b"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.087732 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-kube-api-access-nbpzz" (OuterVolumeSpecName: "kube-api-access-nbpzz") pod "ac2f050c-f8d0-4ae5-9205-17e00f283e4b" (UID: "ac2f050c-f8d0-4ae5-9205-17e00f283e4b"). InnerVolumeSpecName "kube-api-access-nbpzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.107585 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-85fd6c789b-jdlgt" event={"ID":"ac2f050c-f8d0-4ae5-9205-17e00f283e4b","Type":"ContainerDied","Data":"dd55e773356b424c5681116df4ad8d66a6e372f1c854aa3546d3b5d9302523bb"} Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.107934 4703 scope.go:117] "RemoveContainer" containerID="f78b0f9d84606f39b963ac7f2e9ea287421ab9ebf7d40a6a73dbb116f96c8667" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.108275 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-85fd6c789b-jdlgt" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.177479 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7ccfb848f-k68gw" event={"ID":"68a6efb3-cffb-4397-922b-c8486b399f76","Type":"ContainerStarted","Data":"7bb15379819905bc12bf4eeeb3ce31a679db715ab651ff933f3047d7b254ee52"} Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.177601 4703 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.178379 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nbpzz\" (UniqueName: \"kubernetes.io/projected/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-kube-api-access-nbpzz\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.179678 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.183360 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ac2f050c-f8d0-4ae5-9205-17e00f283e4b" (UID: "ac2f050c-f8d0-4ae5-9205-17e00f283e4b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.191149 4703 generic.go:334] "Generic (PLEG): container finished" podID="1665113c-fcaa-4a13-9de2-552579864e44" containerID="9da643b5dca57ef27b2b62dc081db040f72572014de6521b390288855be18261" exitCode=0 Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.191356 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-scrgs" event={"ID":"1665113c-fcaa-4a13-9de2-552579864e44","Type":"ContainerDied","Data":"9da643b5dca57ef27b2b62dc081db040f72572014de6521b390288855be18261"} Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.198695 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-vkzk9" event={"ID":"b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70","Type":"ContainerDied","Data":"f36d6b4492ef8975f36878ebcb27e717feaec0146fd9720b3a0fbbbcbada8bea"} Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.198768 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f36d6b4492ef8975f36878ebcb27e717feaec0146fd9720b3a0fbbbcbada8bea" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.198892 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-vkzk9" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.203508 4703 scope.go:117] "RemoveContainer" containerID="7cac79a2b4c4c93950b47a0d1e832c934582542d6673b18d7481bb7237de22c3" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.215380 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7ccfb848f-k68gw" podStartSLOduration=3.215340365 podStartE2EDuration="3.215340365s" podCreationTimestamp="2026-01-30 12:20:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:21:00.208733073 +0000 UTC m=+1495.986554737" watchObservedRunningTime="2026-01-30 12:21:00.215340365 +0000 UTC m=+1495.993162019" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.233023 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-config" (OuterVolumeSpecName: "config") pod "ac2f050c-f8d0-4ae5-9205-17e00f283e4b" (UID: "ac2f050c-f8d0-4ae5-9205-17e00f283e4b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.256879 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "ac2f050c-f8d0-4ae5-9205-17e00f283e4b" (UID: "ac2f050c-f8d0-4ae5-9205-17e00f283e4b"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.285794 4703 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.285838 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.285852 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/ac2f050c-f8d0-4ae5-9205-17e00f283e4b-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.497755 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-85fd6c789b-jdlgt"] Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.502084 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.520318 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-85fd6c789b-jdlgt"] Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.520402 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.556205 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-97468ff8-5bxdm"] Jan 30 12:21:00 crc kubenswrapper[4703]: E0130 12:21:00.556912 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70" containerName="barbican-db-sync" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.556967 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70" containerName="barbican-db-sync" Jan 30 12:21:00 crc kubenswrapper[4703]: E0130 12:21:00.557021 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" containerName="neutron-httpd" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.557035 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" containerName="neutron-httpd" Jan 30 12:21:00 crc kubenswrapper[4703]: E0130 12:21:00.557051 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" containerName="neutron-api" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.557059 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" containerName="neutron-api" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.557346 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" containerName="neutron-httpd" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.557369 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" containerName="neutron-api" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.557380 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70" containerName="barbican-db-sync" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.558821 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.564598 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.564769 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-rtx45" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.568766 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.605816 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-57d7c7f95-fngdp"] Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.608666 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-57d7c7f95-fngdp" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.616642 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.628623 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-57d7c7f95-fngdp"] Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.678237 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-97468ff8-5bxdm"] Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.706807 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58305133-2318-4520-a3cc-bca7a1d61895-logs\") pod \"barbican-keystone-listener-97468ff8-5bxdm\" (UID: \"58305133-2318-4520-a3cc-bca7a1d61895\") " pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.706878 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/58305133-2318-4520-a3cc-bca7a1d61895-config-data-custom\") pod \"barbican-keystone-listener-97468ff8-5bxdm\" (UID: \"58305133-2318-4520-a3cc-bca7a1d61895\") " pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.706943 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58305133-2318-4520-a3cc-bca7a1d61895-config-data\") pod \"barbican-keystone-listener-97468ff8-5bxdm\" (UID: \"58305133-2318-4520-a3cc-bca7a1d61895\") " pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.706969 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8d5f833-0ceb-4dc6-bb87-ce670386ef8b-config-data-custom\") pod \"barbican-worker-57d7c7f95-fngdp\" (UID: \"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b\") " pod="openstack/barbican-worker-57d7c7f95-fngdp" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.707143 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsk7m\" (UniqueName: \"kubernetes.io/projected/f8d5f833-0ceb-4dc6-bb87-ce670386ef8b-kube-api-access-qsk7m\") pod \"barbican-worker-57d7c7f95-fngdp\" (UID: \"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b\") " pod="openstack/barbican-worker-57d7c7f95-fngdp" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.707226 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8d5f833-0ceb-4dc6-bb87-ce670386ef8b-combined-ca-bundle\") pod \"barbican-worker-57d7c7f95-fngdp\" (UID: \"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b\") " pod="openstack/barbican-worker-57d7c7f95-fngdp" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.707500 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pb6r\" (UniqueName: \"kubernetes.io/projected/58305133-2318-4520-a3cc-bca7a1d61895-kube-api-access-8pb6r\") pod \"barbican-keystone-listener-97468ff8-5bxdm\" (UID: \"58305133-2318-4520-a3cc-bca7a1d61895\") " pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.707653 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58305133-2318-4520-a3cc-bca7a1d61895-combined-ca-bundle\") pod \"barbican-keystone-listener-97468ff8-5bxdm\" (UID: \"58305133-2318-4520-a3cc-bca7a1d61895\") " pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.707687 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8d5f833-0ceb-4dc6-bb87-ce670386ef8b-logs\") pod \"barbican-worker-57d7c7f95-fngdp\" (UID: \"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b\") " pod="openstack/barbican-worker-57d7c7f95-fngdp" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.707736 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8d5f833-0ceb-4dc6-bb87-ce670386ef8b-config-data\") pod \"barbican-worker-57d7c7f95-fngdp\" (UID: \"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b\") " pod="openstack/barbican-worker-57d7c7f95-fngdp" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.709804 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-scqz9"] Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.727227 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.770640 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-scqz9"] Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.809754 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsk7m\" (UniqueName: \"kubernetes.io/projected/f8d5f833-0ceb-4dc6-bb87-ce670386ef8b-kube-api-access-qsk7m\") pod \"barbican-worker-57d7c7f95-fngdp\" (UID: \"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b\") " pod="openstack/barbican-worker-57d7c7f95-fngdp" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.809829 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8d5f833-0ceb-4dc6-bb87-ce670386ef8b-combined-ca-bundle\") pod \"barbican-worker-57d7c7f95-fngdp\" (UID: \"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b\") " pod="openstack/barbican-worker-57d7c7f95-fngdp" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.809917 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pb6r\" (UniqueName: \"kubernetes.io/projected/58305133-2318-4520-a3cc-bca7a1d61895-kube-api-access-8pb6r\") pod \"barbican-keystone-listener-97468ff8-5bxdm\" (UID: \"58305133-2318-4520-a3cc-bca7a1d61895\") " pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.809977 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58305133-2318-4520-a3cc-bca7a1d61895-combined-ca-bundle\") pod \"barbican-keystone-listener-97468ff8-5bxdm\" (UID: \"58305133-2318-4520-a3cc-bca7a1d61895\") " pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.809998 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8d5f833-0ceb-4dc6-bb87-ce670386ef8b-logs\") pod \"barbican-worker-57d7c7f95-fngdp\" (UID: \"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b\") " pod="openstack/barbican-worker-57d7c7f95-fngdp" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.810024 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8d5f833-0ceb-4dc6-bb87-ce670386ef8b-config-data\") pod \"barbican-worker-57d7c7f95-fngdp\" (UID: \"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b\") " pod="openstack/barbican-worker-57d7c7f95-fngdp" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.810115 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58305133-2318-4520-a3cc-bca7a1d61895-logs\") pod \"barbican-keystone-listener-97468ff8-5bxdm\" (UID: \"58305133-2318-4520-a3cc-bca7a1d61895\") " pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.810166 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/58305133-2318-4520-a3cc-bca7a1d61895-config-data-custom\") pod \"barbican-keystone-listener-97468ff8-5bxdm\" (UID: \"58305133-2318-4520-a3cc-bca7a1d61895\") " pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.810209 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58305133-2318-4520-a3cc-bca7a1d61895-config-data\") pod \"barbican-keystone-listener-97468ff8-5bxdm\" (UID: \"58305133-2318-4520-a3cc-bca7a1d61895\") " pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.810233 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8d5f833-0ceb-4dc6-bb87-ce670386ef8b-config-data-custom\") pod \"barbican-worker-57d7c7f95-fngdp\" (UID: \"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b\") " pod="openstack/barbican-worker-57d7c7f95-fngdp" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.811623 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f8d5f833-0ceb-4dc6-bb87-ce670386ef8b-logs\") pod \"barbican-worker-57d7c7f95-fngdp\" (UID: \"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b\") " pod="openstack/barbican-worker-57d7c7f95-fngdp" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.818827 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58305133-2318-4520-a3cc-bca7a1d61895-logs\") pod \"barbican-keystone-listener-97468ff8-5bxdm\" (UID: \"58305133-2318-4520-a3cc-bca7a1d61895\") " pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.836804 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f8d5f833-0ceb-4dc6-bb87-ce670386ef8b-config-data-custom\") pod \"barbican-worker-57d7c7f95-fngdp\" (UID: \"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b\") " pod="openstack/barbican-worker-57d7c7f95-fngdp" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.857180 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8d5f833-0ceb-4dc6-bb87-ce670386ef8b-combined-ca-bundle\") pod \"barbican-worker-57d7c7f95-fngdp\" (UID: \"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b\") " pod="openstack/barbican-worker-57d7c7f95-fngdp" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.858152 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/58305133-2318-4520-a3cc-bca7a1d61895-config-data-custom\") pod \"barbican-keystone-listener-97468ff8-5bxdm\" (UID: \"58305133-2318-4520-a3cc-bca7a1d61895\") " pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.858154 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8d5f833-0ceb-4dc6-bb87-ce670386ef8b-config-data\") pod \"barbican-worker-57d7c7f95-fngdp\" (UID: \"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b\") " pod="openstack/barbican-worker-57d7c7f95-fngdp" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.858222 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58305133-2318-4520-a3cc-bca7a1d61895-combined-ca-bundle\") pod \"barbican-keystone-listener-97468ff8-5bxdm\" (UID: \"58305133-2318-4520-a3cc-bca7a1d61895\") " pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.859352 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58305133-2318-4520-a3cc-bca7a1d61895-config-data\") pod \"barbican-keystone-listener-97468ff8-5bxdm\" (UID: \"58305133-2318-4520-a3cc-bca7a1d61895\") " pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.868186 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pb6r\" (UniqueName: \"kubernetes.io/projected/58305133-2318-4520-a3cc-bca7a1d61895-kube-api-access-8pb6r\") pod \"barbican-keystone-listener-97468ff8-5bxdm\" (UID: \"58305133-2318-4520-a3cc-bca7a1d61895\") " pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.891056 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsk7m\" (UniqueName: \"kubernetes.io/projected/f8d5f833-0ceb-4dc6-bb87-ce670386ef8b-kube-api-access-qsk7m\") pod \"barbican-worker-57d7c7f95-fngdp\" (UID: \"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b\") " pod="openstack/barbican-worker-57d7c7f95-fngdp" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.892349 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.893182 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.894563 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.914023 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.914157 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-dns-svc\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.914217 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.914276 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4lndt\" (UniqueName: \"kubernetes.io/projected/c0a4e828-1f06-4be9-880c-eea97ff9ef11-kube-api-access-4lndt\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.914330 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-config\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.914555 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.958247 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-655b7696db-jp55x"] Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.960605 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.967756 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 30 12:21:00 crc kubenswrapper[4703]: I0130 12:21:00.978588 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-655b7696db-jp55x"] Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.016693 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-dns-svc\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.016796 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.016825 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4lndt\" (UniqueName: \"kubernetes.io/projected/c0a4e828-1f06-4be9-880c-eea97ff9ef11-kube-api-access-4lndt\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.016865 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-config\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.017049 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.017099 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.018325 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-ovsdbserver-nb\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.019219 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-dns-svc\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.020336 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-ovsdbserver-sb\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.030928 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-config\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.045554 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-dns-swift-storage-0\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.061292 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4lndt\" (UniqueName: \"kubernetes.io/projected/c0a4e828-1f06-4be9-880c-eea97ff9ef11-kube-api-access-4lndt\") pod \"dnsmasq-dns-85ff748b95-scqz9\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.069906 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-57d7c7f95-fngdp" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.097068 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.141605 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-config-data-custom\") pod \"barbican-api-655b7696db-jp55x\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.142086 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c6d5132-9798-418e-92c7-bb0b50b3fd47-logs\") pod \"barbican-api-655b7696db-jp55x\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.142289 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z64wn\" (UniqueName: \"kubernetes.io/projected/2c6d5132-9798-418e-92c7-bb0b50b3fd47-kube-api-access-z64wn\") pod \"barbican-api-655b7696db-jp55x\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.142396 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-config-data\") pod \"barbican-api-655b7696db-jp55x\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.142540 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-combined-ca-bundle\") pod \"barbican-api-655b7696db-jp55x\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.181840 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac2f050c-f8d0-4ae5-9205-17e00f283e4b" path="/var/lib/kubelet/pods/ac2f050c-f8d0-4ae5-9205-17e00f283e4b/volumes" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.245307 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-config-data-custom\") pod \"barbican-api-655b7696db-jp55x\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.245792 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c6d5132-9798-418e-92c7-bb0b50b3fd47-logs\") pod \"barbican-api-655b7696db-jp55x\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.245888 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z64wn\" (UniqueName: \"kubernetes.io/projected/2c6d5132-9798-418e-92c7-bb0b50b3fd47-kube-api-access-z64wn\") pod \"barbican-api-655b7696db-jp55x\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.245932 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-config-data\") pod \"barbican-api-655b7696db-jp55x\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.246003 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-combined-ca-bundle\") pod \"barbican-api-655b7696db-jp55x\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.251261 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c6d5132-9798-418e-92c7-bb0b50b3fd47-logs\") pod \"barbican-api-655b7696db-jp55x\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.256848 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-config-data-custom\") pod \"barbican-api-655b7696db-jp55x\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.263308 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-config-data\") pod \"barbican-api-655b7696db-jp55x\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.264167 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-combined-ca-bundle\") pod \"barbican-api-655b7696db-jp55x\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.301049 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z64wn\" (UniqueName: \"kubernetes.io/projected/2c6d5132-9798-418e-92c7-bb0b50b3fd47-kube-api-access-z64wn\") pod \"barbican-api-655b7696db-jp55x\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.457036 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:01 crc kubenswrapper[4703]: I0130 12:21:01.799557 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-97468ff8-5bxdm"] Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.162248 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-scrgs" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.181666 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-57d7c7f95-fngdp"] Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.307138 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" event={"ID":"58305133-2318-4520-a3cc-bca7a1d61895","Type":"ContainerStarted","Data":"c3f8b6336d3d2a0a9289060ad984d052d320bbbcafa92fd37a5519c026f5e2d5"} Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.312277 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-57d7c7f95-fngdp" event={"ID":"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b","Type":"ContainerStarted","Data":"264f37bed0b98c48a6dff73869c7328964f13c4bb6efb540b8b35d1402cd8189"} Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.339159 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1665113c-fcaa-4a13-9de2-552579864e44-etc-machine-id\") pod \"1665113c-fcaa-4a13-9de2-552579864e44\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.339332 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-combined-ca-bundle\") pod \"1665113c-fcaa-4a13-9de2-552579864e44\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.339399 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1665113c-fcaa-4a13-9de2-552579864e44-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "1665113c-fcaa-4a13-9de2-552579864e44" (UID: "1665113c-fcaa-4a13-9de2-552579864e44"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.339440 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-db-sync-config-data\") pod \"1665113c-fcaa-4a13-9de2-552579864e44\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.339490 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4xclk\" (UniqueName: \"kubernetes.io/projected/1665113c-fcaa-4a13-9de2-552579864e44-kube-api-access-4xclk\") pod \"1665113c-fcaa-4a13-9de2-552579864e44\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.339617 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-scripts\") pod \"1665113c-fcaa-4a13-9de2-552579864e44\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.339792 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-config-data\") pod \"1665113c-fcaa-4a13-9de2-552579864e44\" (UID: \"1665113c-fcaa-4a13-9de2-552579864e44\") " Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.341475 4703 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1665113c-fcaa-4a13-9de2-552579864e44-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.341936 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-scrgs" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.342366 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-scrgs" event={"ID":"1665113c-fcaa-4a13-9de2-552579864e44","Type":"ContainerDied","Data":"91a4c6344336e8c730ae40725c1fd99e5bd6410bb1a0b83bfb28844435442efc"} Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.342405 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91a4c6344336e8c730ae40725c1fd99e5bd6410bb1a0b83bfb28844435442efc" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.355451 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-scripts" (OuterVolumeSpecName: "scripts") pod "1665113c-fcaa-4a13-9de2-552579864e44" (UID: "1665113c-fcaa-4a13-9de2-552579864e44"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.368001 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-655b7696db-jp55x"] Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.370690 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "1665113c-fcaa-4a13-9de2-552579864e44" (UID: "1665113c-fcaa-4a13-9de2-552579864e44"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.370904 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1665113c-fcaa-4a13-9de2-552579864e44-kube-api-access-4xclk" (OuterVolumeSpecName: "kube-api-access-4xclk") pod "1665113c-fcaa-4a13-9de2-552579864e44" (UID: "1665113c-fcaa-4a13-9de2-552579864e44"). InnerVolumeSpecName "kube-api-access-4xclk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.405987 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-scqz9"] Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.447370 4703 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.447432 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4xclk\" (UniqueName: \"kubernetes.io/projected/1665113c-fcaa-4a13-9de2-552579864e44-kube-api-access-4xclk\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.447451 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.467371 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1665113c-fcaa-4a13-9de2-552579864e44" (UID: "1665113c-fcaa-4a13-9de2-552579864e44"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.511371 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-config-data" (OuterVolumeSpecName: "config-data") pod "1665113c-fcaa-4a13-9de2-552579864e44" (UID: "1665113c-fcaa-4a13-9de2-552579864e44"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.549857 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.549927 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1665113c-fcaa-4a13-9de2-552579864e44-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.637422 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 30 12:21:02 crc kubenswrapper[4703]: E0130 12:21:02.639394 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1665113c-fcaa-4a13-9de2-552579864e44" containerName="cinder-db-sync" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.639427 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="1665113c-fcaa-4a13-9de2-552579864e44" containerName="cinder-db-sync" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.639829 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="1665113c-fcaa-4a13-9de2-552579864e44" containerName="cinder-db-sync" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.644082 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.665171 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.665391 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-config-data\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.665482 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.665876 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2brxx\" (UniqueName: \"kubernetes.io/projected/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-kube-api-access-2brxx\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.665938 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-scripts\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.666084 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.667860 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.671333 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.768142 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2brxx\" (UniqueName: \"kubernetes.io/projected/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-kube-api-access-2brxx\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.768206 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-scripts\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.768274 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.768342 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.768389 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-config-data\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.768427 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.774813 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.777725 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.791464 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-scripts\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.798885 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.811651 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-config-data\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.817008 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2brxx\" (UniqueName: \"kubernetes.io/projected/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-kube-api-access-2brxx\") pod \"cinder-scheduler-0\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.946909 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-scqz9"] Jan 30 12:21:02 crc kubenswrapper[4703]: I0130 12:21:02.960871 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.087229 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-mfcxn"] Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.092628 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.168701 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-mfcxn"] Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.245862 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.342579 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.347712 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.347860 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.347952 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-config\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.347990 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.348013 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-config-data-custom\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.348054 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-config-data\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.348190 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.348224 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-logs\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.348255 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.348300 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4pq68\" (UniqueName: \"kubernetes.io/projected/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-kube-api-access-4pq68\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.348347 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.348384 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2nxj\" (UniqueName: \"kubernetes.io/projected/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-kube-api-access-t2nxj\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.348432 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-scripts\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.397252 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.397575 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-decision-engine-0" podUID="26df44bd-bd05-4ed3-b146-fa1111db982e" containerName="watcher-decision-engine" containerID="cri-o://ddfffd2731b492f8a028180da68fad74e9d83ce5f5552968d2ed9fc2d5b71b5d" gracePeriod=30 Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.404037 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.407419 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-655b7696db-jp55x" event={"ID":"2c6d5132-9798-418e-92c7-bb0b50b3fd47","Type":"ContainerStarted","Data":"653cc576beb0aa27ee6d08fbeb0e44bca965d94c921c6662d2cce93ef06cfefe"} Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.407482 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-655b7696db-jp55x" event={"ID":"2c6d5132-9798-418e-92c7-bb0b50b3fd47","Type":"ContainerStarted","Data":"f2c4ddfb02f2a74fbd7114cddbce48dcf39c210a7a99466f076980e2882d73b5"} Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.415912 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-scqz9" event={"ID":"c0a4e828-1f06-4be9-880c-eea97ff9ef11","Type":"ContainerStarted","Data":"68658fecc2b5abcb92fd67322d6b31c6988f618aa8ab6ea3ca99ce8d58f63cae"} Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.438896 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.591180 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.591937 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4pq68\" (UniqueName: \"kubernetes.io/projected/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-kube-api-access-4pq68\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.592160 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.592384 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2nxj\" (UniqueName: \"kubernetes.io/projected/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-kube-api-access-t2nxj\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.592825 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-scripts\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.593907 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.594081 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.595244 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-ovsdbserver-nb\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.595743 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-ovsdbserver-sb\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.598530 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-dns-swift-storage-0\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.599654 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-config\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.599860 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.599900 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-config-data-custom\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.599935 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-config-data\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.600149 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.600233 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-logs\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.601091 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-logs\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.602446 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-dns-svc\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.617822 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-config\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.621383 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-config-data\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.623613 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-etc-machine-id\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.624792 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-scripts\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.633240 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.645033 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-config-data-custom\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.683730 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4pq68\" (UniqueName: \"kubernetes.io/projected/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-kube-api-access-4pq68\") pod \"cinder-api-0\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.695763 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2nxj\" (UniqueName: \"kubernetes.io/projected/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-kube-api-access-t2nxj\") pod \"dnsmasq-dns-5c9776ccc5-mfcxn\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.706305 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.761305 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:03 crc kubenswrapper[4703]: I0130 12:21:03.859600 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 30 12:21:04 crc kubenswrapper[4703]: I0130 12:21:04.496758 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c37c42a3-786f-45bf-a2e1-1fa57ebe162a","Type":"ContainerStarted","Data":"6905fd089a0ff764cf1265bf46d7da7264bc8b69942506c37c6058093e12d47d"} Jan 30 12:21:04 crc kubenswrapper[4703]: I0130 12:21:04.541861 4703 generic.go:334] "Generic (PLEG): container finished" podID="c0a4e828-1f06-4be9-880c-eea97ff9ef11" containerID="ce3ee55018b3288d22b437391e15b4e9ca8b5e39a27fa02a3579d8a69cd500c4" exitCode=0 Jan 30 12:21:04 crc kubenswrapper[4703]: I0130 12:21:04.541936 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-scqz9" event={"ID":"c0a4e828-1f06-4be9-880c-eea97ff9ef11","Type":"ContainerDied","Data":"ce3ee55018b3288d22b437391e15b4e9ca8b5e39a27fa02a3579d8a69cd500c4"} Jan 30 12:21:04 crc kubenswrapper[4703]: I0130 12:21:04.650804 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-mfcxn"] Jan 30 12:21:04 crc kubenswrapper[4703]: W0130 12:21:04.741245 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c2c4fcd_7779_44aa_81a2_d544a730b5e9.slice/crio-60b300be6a4ae9b2ec83187000942fd08ef37e22971a21c928d4237b093a5874 WatchSource:0}: Error finding container 60b300be6a4ae9b2ec83187000942fd08ef37e22971a21c928d4237b093a5874: Status 404 returned error can't find the container with id 60b300be6a4ae9b2ec83187000942fd08ef37e22971a21c928d4237b093a5874 Jan 30 12:21:04 crc kubenswrapper[4703]: I0130 12:21:04.918318 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 30 12:21:04 crc kubenswrapper[4703]: W0130 12:21:04.983383 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe1448d1_2faa_4d2d_9d5b_f6bfb2f08c3f.slice/crio-f542f0fa5b44d1682635605bd2955fc5eb597decb60e6b654679319b286f2666 WatchSource:0}: Error finding container f542f0fa5b44d1682635605bd2955fc5eb597decb60e6b654679319b286f2666: Status 404 returned error can't find the container with id f542f0fa5b44d1682635605bd2955fc5eb597decb60e6b654679319b286f2666 Jan 30 12:21:05 crc kubenswrapper[4703]: I0130 12:21:05.256166 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rs9pr" Jan 30 12:21:05 crc kubenswrapper[4703]: I0130 12:21:05.256731 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rs9pr" Jan 30 12:21:05 crc kubenswrapper[4703]: I0130 12:21:05.577415 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f","Type":"ContainerStarted","Data":"f542f0fa5b44d1682635605bd2955fc5eb597decb60e6b654679319b286f2666"} Jan 30 12:21:05 crc kubenswrapper[4703]: I0130 12:21:05.582435 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-655b7696db-jp55x" event={"ID":"2c6d5132-9798-418e-92c7-bb0b50b3fd47","Type":"ContainerStarted","Data":"655e99fe1eacd608561bffbe785ec2376d1022f007050665de9607175d526fce"} Jan 30 12:21:05 crc kubenswrapper[4703]: I0130 12:21:05.584441 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:05 crc kubenswrapper[4703]: I0130 12:21:05.584478 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:05 crc kubenswrapper[4703]: I0130 12:21:05.587567 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" event={"ID":"2c2c4fcd-7779-44aa-81a2-d544a730b5e9","Type":"ContainerStarted","Data":"60b300be6a4ae9b2ec83187000942fd08ef37e22971a21c928d4237b093a5874"} Jan 30 12:21:05 crc kubenswrapper[4703]: I0130 12:21:05.635457 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-655b7696db-jp55x" podStartSLOduration=5.63542568 podStartE2EDuration="5.63542568s" podCreationTimestamp="2026-01-30 12:21:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:21:05.610164494 +0000 UTC m=+1501.387986148" watchObservedRunningTime="2026-01-30 12:21:05.63542568 +0000 UTC m=+1501.413247334" Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.025810 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.127244 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-config\") pod \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.127848 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4lndt\" (UniqueName: \"kubernetes.io/projected/c0a4e828-1f06-4be9-880c-eea97ff9ef11-kube-api-access-4lndt\") pod \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.127894 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-dns-swift-storage-0\") pod \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.128037 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-ovsdbserver-nb\") pod \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.128270 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-ovsdbserver-sb\") pod \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.128314 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-dns-svc\") pod \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\" (UID: \"c0a4e828-1f06-4be9-880c-eea97ff9ef11\") " Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.169054 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c0a4e828-1f06-4be9-880c-eea97ff9ef11" (UID: "c0a4e828-1f06-4be9-880c-eea97ff9ef11"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.181173 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c0a4e828-1f06-4be9-880c-eea97ff9ef11" (UID: "c0a4e828-1f06-4be9-880c-eea97ff9ef11"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.191040 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-config" (OuterVolumeSpecName: "config") pod "c0a4e828-1f06-4be9-880c-eea97ff9ef11" (UID: "c0a4e828-1f06-4be9-880c-eea97ff9ef11"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.203644 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c0a4e828-1f06-4be9-880c-eea97ff9ef11" (UID: "c0a4e828-1f06-4be9-880c-eea97ff9ef11"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.213468 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0a4e828-1f06-4be9-880c-eea97ff9ef11-kube-api-access-4lndt" (OuterVolumeSpecName: "kube-api-access-4lndt") pod "c0a4e828-1f06-4be9-880c-eea97ff9ef11" (UID: "c0a4e828-1f06-4be9-880c-eea97ff9ef11"). InnerVolumeSpecName "kube-api-access-4lndt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.231583 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.233606 4703 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.233716 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.233804 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4lndt\" (UniqueName: \"kubernetes.io/projected/c0a4e828-1f06-4be9-880c-eea97ff9ef11-kube-api-access-4lndt\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.233936 4703 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.235494 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c0a4e828-1f06-4be9-880c-eea97ff9ef11" (UID: "c0a4e828-1f06-4be9-880c-eea97ff9ef11"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.303139 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rs9pr" podUID="7d3ec3ba-8eec-4f98-8948-6cf24d5638b7" containerName="registry-server" probeResult="failure" output=< Jan 30 12:21:06 crc kubenswrapper[4703]: timeout: failed to connect service ":50051" within 1s Jan 30 12:21:06 crc kubenswrapper[4703]: > Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.337317 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c0a4e828-1f06-4be9-880c-eea97ff9ef11-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.673703 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-85ff748b95-scqz9" event={"ID":"c0a4e828-1f06-4be9-880c-eea97ff9ef11","Type":"ContainerDied","Data":"68658fecc2b5abcb92fd67322d6b31c6988f618aa8ab6ea3ca99ce8d58f63cae"} Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.674422 4703 scope.go:117] "RemoveContainer" containerID="ce3ee55018b3288d22b437391e15b4e9ca8b5e39a27fa02a3579d8a69cd500c4" Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.674727 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-85ff748b95-scqz9" Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.682685 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.715395 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f","Type":"ContainerStarted","Data":"882c8d3f52bc214e09cae7afc718096bfcafe9fc070703dab4d7ee410e6167f4"} Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.720773 4703 generic.go:334] "Generic (PLEG): container finished" podID="2c2c4fcd-7779-44aa-81a2-d544a730b5e9" containerID="11af69b92cae706c2908484b0a0c3b489b6f1f95bd8dd0a6006f861739282196" exitCode=0 Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.722677 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" event={"ID":"2c2c4fcd-7779-44aa-81a2-d544a730b5e9","Type":"ContainerDied","Data":"11af69b92cae706c2908484b0a0c3b489b6f1f95bd8dd0a6006f861739282196"} Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.850228 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-scqz9"] Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.873439 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-85ff748b95-scqz9"] Jan 30 12:21:06 crc kubenswrapper[4703]: I0130 12:21:06.897899 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-7946f9d8b5-42hg8" Jan 30 12:21:07 crc kubenswrapper[4703]: I0130 12:21:07.140455 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0a4e828-1f06-4be9-880c-eea97ff9ef11" path="/var/lib/kubelet/pods/c0a4e828-1f06-4be9-880c-eea97ff9ef11/volumes" Jan 30 12:21:07 crc kubenswrapper[4703]: I0130 12:21:07.735352 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c37c42a3-786f-45bf-a2e1-1fa57ebe162a","Type":"ContainerStarted","Data":"a3031845f0de92d153609ad1fbd8a01f948f39897de754e443aa2a8189672435"} Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.746227 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-78d958758-q982d"] Jan 30 12:21:08 crc kubenswrapper[4703]: E0130 12:21:08.749372 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0a4e828-1f06-4be9-880c-eea97ff9ef11" containerName="init" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.749523 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0a4e828-1f06-4be9-880c-eea97ff9ef11" containerName="init" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.749925 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0a4e828-1f06-4be9-880c-eea97ff9ef11" containerName="init" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.751748 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.756183 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.761599 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.785274 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-78d958758-q982d"] Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.810099 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zl4pm\" (UniqueName: \"kubernetes.io/projected/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-kube-api-access-zl4pm\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.810251 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-internal-tls-certs\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.810289 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-logs\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.810309 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-config-data\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.810506 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-config-data-custom\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.810571 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-public-tls-certs\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.810667 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-combined-ca-bundle\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.912677 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-config-data-custom\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.912752 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-public-tls-certs\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.912818 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-combined-ca-bundle\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.912933 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zl4pm\" (UniqueName: \"kubernetes.io/projected/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-kube-api-access-zl4pm\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.912971 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-internal-tls-certs\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.913000 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-logs\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.913019 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-config-data\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.921887 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-config-data\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.930327 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-internal-tls-certs\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.930525 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-config-data-custom\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.930727 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-logs\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.937799 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-public-tls-certs\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.944585 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-combined-ca-bundle\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:08 crc kubenswrapper[4703]: I0130 12:21:08.949091 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zl4pm\" (UniqueName: \"kubernetes.io/projected/0f2e50f0-ca1d-4b8f-b284-cbf7069e1279-kube-api-access-zl4pm\") pod \"barbican-api-78d958758-q982d\" (UID: \"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279\") " pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:09 crc kubenswrapper[4703]: I0130 12:21:09.116379 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:10 crc kubenswrapper[4703]: I0130 12:21:10.503765 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5f9958979d-8h859" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.159:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.159:8443: connect: connection refused" Jan 30 12:21:10 crc kubenswrapper[4703]: I0130 12:21:10.885702 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-77fb4cf9b8-pw692" podUID="9c6d3262-7469-45ac-b5c8-9eb0f9456a5a" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.160:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.160:8443: connect: connection refused" Jan 30 12:21:11 crc kubenswrapper[4703]: I0130 12:21:11.140718 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-78d958758-q982d"] Jan 30 12:21:11 crc kubenswrapper[4703]: I0130 12:21:11.863591 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 30 12:21:11 crc kubenswrapper[4703]: I0130 12:21:11.871486 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 30 12:21:11 crc kubenswrapper[4703]: I0130 12:21:11.880622 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 30 12:21:11 crc kubenswrapper[4703]: I0130 12:21:11.881377 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 30 12:21:11 crc kubenswrapper[4703]: I0130 12:21:11.881677 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-f68hd" Jan 30 12:21:11 crc kubenswrapper[4703]: I0130 12:21:11.911915 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 30 12:21:11 crc kubenswrapper[4703]: I0130 12:21:11.914456 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64e4eca7-b75f-4ac6-ba29-f017d5aec24e-combined-ca-bundle\") pod \"openstackclient\" (UID: \"64e4eca7-b75f-4ac6-ba29-f017d5aec24e\") " pod="openstack/openstackclient" Jan 30 12:21:11 crc kubenswrapper[4703]: I0130 12:21:11.914582 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/64e4eca7-b75f-4ac6-ba29-f017d5aec24e-openstack-config-secret\") pod \"openstackclient\" (UID: \"64e4eca7-b75f-4ac6-ba29-f017d5aec24e\") " pod="openstack/openstackclient" Jan 30 12:21:11 crc kubenswrapper[4703]: I0130 12:21:11.914649 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/64e4eca7-b75f-4ac6-ba29-f017d5aec24e-openstack-config\") pod \"openstackclient\" (UID: \"64e4eca7-b75f-4ac6-ba29-f017d5aec24e\") " pod="openstack/openstackclient" Jan 30 12:21:11 crc kubenswrapper[4703]: I0130 12:21:11.914721 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4njm4\" (UniqueName: \"kubernetes.io/projected/64e4eca7-b75f-4ac6-ba29-f017d5aec24e-kube-api-access-4njm4\") pod \"openstackclient\" (UID: \"64e4eca7-b75f-4ac6-ba29-f017d5aec24e\") " pod="openstack/openstackclient" Jan 30 12:21:11 crc kubenswrapper[4703]: I0130 12:21:11.925080 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" event={"ID":"2c2c4fcd-7779-44aa-81a2-d544a730b5e9","Type":"ContainerStarted","Data":"ed78a9b7c581e15e5ea81353b6241ff6a032d90d74b485e73a2ff1778617ba01"} Jan 30 12:21:11 crc kubenswrapper[4703]: I0130 12:21:11.925505 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:11 crc kubenswrapper[4703]: I0130 12:21:11.956821 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" podStartSLOduration=8.956789527 podStartE2EDuration="8.956789527s" podCreationTimestamp="2026-01-30 12:21:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:21:11.953600774 +0000 UTC m=+1507.731422428" watchObservedRunningTime="2026-01-30 12:21:11.956789527 +0000 UTC m=+1507.734611181" Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.017101 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/64e4eca7-b75f-4ac6-ba29-f017d5aec24e-openstack-config\") pod \"openstackclient\" (UID: \"64e4eca7-b75f-4ac6-ba29-f017d5aec24e\") " pod="openstack/openstackclient" Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.017203 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4njm4\" (UniqueName: \"kubernetes.io/projected/64e4eca7-b75f-4ac6-ba29-f017d5aec24e-kube-api-access-4njm4\") pod \"openstackclient\" (UID: \"64e4eca7-b75f-4ac6-ba29-f017d5aec24e\") " pod="openstack/openstackclient" Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.017360 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64e4eca7-b75f-4ac6-ba29-f017d5aec24e-combined-ca-bundle\") pod \"openstackclient\" (UID: \"64e4eca7-b75f-4ac6-ba29-f017d5aec24e\") " pod="openstack/openstackclient" Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.017412 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/64e4eca7-b75f-4ac6-ba29-f017d5aec24e-openstack-config-secret\") pod \"openstackclient\" (UID: \"64e4eca7-b75f-4ac6-ba29-f017d5aec24e\") " pod="openstack/openstackclient" Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.019721 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/64e4eca7-b75f-4ac6-ba29-f017d5aec24e-openstack-config\") pod \"openstackclient\" (UID: \"64e4eca7-b75f-4ac6-ba29-f017d5aec24e\") " pod="openstack/openstackclient" Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.025509 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/64e4eca7-b75f-4ac6-ba29-f017d5aec24e-openstack-config-secret\") pod \"openstackclient\" (UID: \"64e4eca7-b75f-4ac6-ba29-f017d5aec24e\") " pod="openstack/openstackclient" Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.029058 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64e4eca7-b75f-4ac6-ba29-f017d5aec24e-combined-ca-bundle\") pod \"openstackclient\" (UID: \"64e4eca7-b75f-4ac6-ba29-f017d5aec24e\") " pod="openstack/openstackclient" Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.043940 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4njm4\" (UniqueName: \"kubernetes.io/projected/64e4eca7-b75f-4ac6-ba29-f017d5aec24e-kube-api-access-4njm4\") pod \"openstackclient\" (UID: \"64e4eca7-b75f-4ac6-ba29-f017d5aec24e\") " pod="openstack/openstackclient" Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.238353 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.823075 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.823183 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.823255 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.824325 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ae09ea0f762d55711d4ebe52875d9283498e826f1ea02651fb958e545587bc81"} pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.824392 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" containerID="cri-o://ae09ea0f762d55711d4ebe52875d9283498e826f1ea02651fb958e545587bc81" gracePeriod=600 Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.953831 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-78d958758-q982d" event={"ID":"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279","Type":"ContainerStarted","Data":"5bebd54ca360b71f6fa5d658e4786f3284719d792daa87d2b66a866a235b1539"} Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.961930 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" containerName="cinder-api-log" containerID="cri-o://882c8d3f52bc214e09cae7afc718096bfcafe9fc070703dab4d7ee410e6167f4" gracePeriod=30 Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.962020 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f","Type":"ContainerStarted","Data":"dc8e2cf0b706f06a41d1755c2864253454dde003bef1c8218ab2edc6f4126207"} Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.962233 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" containerName="cinder-api" containerID="cri-o://dc8e2cf0b706f06a41d1755c2864253454dde003bef1c8218ab2edc6f4126207" gracePeriod=30 Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.963109 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 30 12:21:12 crc kubenswrapper[4703]: I0130 12:21:12.975311 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c37c42a3-786f-45bf-a2e1-1fa57ebe162a","Type":"ContainerStarted","Data":"9d6aec883a6c98ecb2815ec50810b4d1f4f8752ea5e25d2b268fccf7e8d52e89"} Jan 30 12:21:13 crc kubenswrapper[4703]: I0130 12:21:13.043669 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=10.043618319 podStartE2EDuration="10.043618319s" podCreationTimestamp="2026-01-30 12:21:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:21:13.002558073 +0000 UTC m=+1508.780379747" watchObservedRunningTime="2026-01-30 12:21:13.043618319 +0000 UTC m=+1508.821439973" Jan 30 12:21:13 crc kubenswrapper[4703]: I0130 12:21:13.055200 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=8.803675053 podStartE2EDuration="11.055171049s" podCreationTimestamp="2026-01-30 12:21:02 +0000 UTC" firstStartedPulling="2026-01-30 12:21:03.952908764 +0000 UTC m=+1499.730730418" lastFinishedPulling="2026-01-30 12:21:06.20440476 +0000 UTC m=+1501.982226414" observedRunningTime="2026-01-30 12:21:13.036605197 +0000 UTC m=+1508.814426851" watchObservedRunningTime="2026-01-30 12:21:13.055171049 +0000 UTC m=+1508.832992693" Jan 30 12:21:13 crc kubenswrapper[4703]: I0130 12:21:13.644415 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 30 12:21:13 crc kubenswrapper[4703]: W0130 12:21:13.792922 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod64e4eca7_b75f_4ac6_ba29_f017d5aec24e.slice/crio-f048bbe6010a94c27797bb3761da088b289beb59fc50a1ed3a1dd6658185c0f5 WatchSource:0}: Error finding container f048bbe6010a94c27797bb3761da088b289beb59fc50a1ed3a1dd6658185c0f5: Status 404 returned error can't find the container with id f048bbe6010a94c27797bb3761da088b289beb59fc50a1ed3a1dd6658185c0f5 Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.031046 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"64e4eca7-b75f-4ac6-ba29-f017d5aec24e","Type":"ContainerStarted","Data":"f048bbe6010a94c27797bb3761da088b289beb59fc50a1ed3a1dd6658185c0f5"} Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.070515 4703 generic.go:334] "Generic (PLEG): container finished" podID="26df44bd-bd05-4ed3-b146-fa1111db982e" containerID="ddfffd2731b492f8a028180da68fad74e9d83ce5f5552968d2ed9fc2d5b71b5d" exitCode=0 Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.070622 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"26df44bd-bd05-4ed3-b146-fa1111db982e","Type":"ContainerDied","Data":"ddfffd2731b492f8a028180da68fad74e9d83ce5f5552968d2ed9fc2d5b71b5d"} Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.111502 4703 generic.go:334] "Generic (PLEG): container finished" podID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerID="ae09ea0f762d55711d4ebe52875d9283498e826f1ea02651fb958e545587bc81" exitCode=0 Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.111671 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerDied","Data":"ae09ea0f762d55711d4ebe52875d9283498e826f1ea02651fb958e545587bc81"} Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.111727 4703 scope.go:117] "RemoveContainer" containerID="cc0f01995e25e263737b7a37c11076b9211642f05e9b4b225e1ac40c3094db02" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.130134 4703 generic.go:334] "Generic (PLEG): container finished" podID="be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" containerID="dc8e2cf0b706f06a41d1755c2864253454dde003bef1c8218ab2edc6f4126207" exitCode=0 Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.130560 4703 generic.go:334] "Generic (PLEG): container finished" podID="be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" containerID="882c8d3f52bc214e09cae7afc718096bfcafe9fc070703dab4d7ee410e6167f4" exitCode=143 Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.130645 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f","Type":"ContainerDied","Data":"dc8e2cf0b706f06a41d1755c2864253454dde003bef1c8218ab2edc6f4126207"} Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.130703 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f","Type":"ContainerDied","Data":"882c8d3f52bc214e09cae7afc718096bfcafe9fc070703dab4d7ee410e6167f4"} Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.142980 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" event={"ID":"58305133-2318-4520-a3cc-bca7a1d61895","Type":"ContainerStarted","Data":"3e0f432a723d419772cfc24556e32c3a51d89a866e086c01fcb72d2c9e8356c0"} Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.163254 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-78d958758-q982d" event={"ID":"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279","Type":"ContainerStarted","Data":"a897944cc2146896cae4758f8917ae78364f7f936da6c1d59f19557176913b45"} Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.381493 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.459244 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.507257 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-etc-machine-id\") pod \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.507882 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4pq68\" (UniqueName: \"kubernetes.io/projected/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-kube-api-access-4pq68\") pod \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.507934 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-config-data\") pod \"26df44bd-bd05-4ed3-b146-fa1111db982e\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.507988 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-combined-ca-bundle\") pod \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.508017 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-logs\") pod \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.508051 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-config-data\") pod \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.508178 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-config-data-custom\") pod \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.508315 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-scripts\") pod \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\" (UID: \"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f\") " Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.508281 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" (UID: "be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.508363 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-custom-prometheus-ca\") pod \"26df44bd-bd05-4ed3-b146-fa1111db982e\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.508457 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-combined-ca-bundle\") pod \"26df44bd-bd05-4ed3-b146-fa1111db982e\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.508504 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76d9l\" (UniqueName: \"kubernetes.io/projected/26df44bd-bd05-4ed3-b146-fa1111db982e-kube-api-access-76d9l\") pod \"26df44bd-bd05-4ed3-b146-fa1111db982e\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.508558 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26df44bd-bd05-4ed3-b146-fa1111db982e-logs\") pod \"26df44bd-bd05-4ed3-b146-fa1111db982e\" (UID: \"26df44bd-bd05-4ed3-b146-fa1111db982e\") " Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.509470 4703 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.510325 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26df44bd-bd05-4ed3-b146-fa1111db982e-logs" (OuterVolumeSpecName: "logs") pod "26df44bd-bd05-4ed3-b146-fa1111db982e" (UID: "26df44bd-bd05-4ed3-b146-fa1111db982e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.514451 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-logs" (OuterVolumeSpecName: "logs") pod "be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" (UID: "be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.536215 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26df44bd-bd05-4ed3-b146-fa1111db982e-kube-api-access-76d9l" (OuterVolumeSpecName: "kube-api-access-76d9l") pod "26df44bd-bd05-4ed3-b146-fa1111db982e" (UID: "26df44bd-bd05-4ed3-b146-fa1111db982e"). InnerVolumeSpecName "kube-api-access-76d9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.537071 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-scripts" (OuterVolumeSpecName: "scripts") pod "be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" (UID: "be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.538341 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" (UID: "be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.541798 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-kube-api-access-4pq68" (OuterVolumeSpecName: "kube-api-access-4pq68") pod "be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" (UID: "be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f"). InnerVolumeSpecName "kube-api-access-4pq68". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.612385 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.612513 4703 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.612536 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.612545 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76d9l\" (UniqueName: \"kubernetes.io/projected/26df44bd-bd05-4ed3-b146-fa1111db982e-kube-api-access-76d9l\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.612645 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26df44bd-bd05-4ed3-b146-fa1111db982e-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.612655 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4pq68\" (UniqueName: \"kubernetes.io/projected/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-kube-api-access-4pq68\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.879199 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "26df44bd-bd05-4ed3-b146-fa1111db982e" (UID: "26df44bd-bd05-4ed3-b146-fa1111db982e"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.927336 4703 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.991858 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" (UID: "be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:14 crc kubenswrapper[4703]: I0130 12:21:14.992504 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "26df44bd-bd05-4ed3-b146-fa1111db982e" (UID: "26df44bd-bd05-4ed3-b146-fa1111db982e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.025367 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-config-data" (OuterVolumeSpecName: "config-data") pod "26df44bd-bd05-4ed3-b146-fa1111db982e" (UID: "26df44bd-bd05-4ed3-b146-fa1111db982e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.049738 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.049780 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/26df44bd-bd05-4ed3-b146-fa1111db982e-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.049791 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.234888 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.234945 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f","Type":"ContainerDied","Data":"f542f0fa5b44d1682635605bd2955fc5eb597decb60e6b654679319b286f2666"} Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.235013 4703 scope.go:117] "RemoveContainer" containerID="dc8e2cf0b706f06a41d1755c2864253454dde003bef1c8218ab2edc6f4126207" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.248844 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" event={"ID":"58305133-2318-4520-a3cc-bca7a1d61895","Type":"ContainerStarted","Data":"e4ff2175d93a687c42510c27ff0bb4c2671b640315f31b8c2dd867fb824284e1"} Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.277438 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-57d7c7f95-fngdp" event={"ID":"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b","Type":"ContainerStarted","Data":"53b466b87ac41b639fccc58a01ec4e044e06b6f2dcfc0ff60a15c98d75ffbe7e"} Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.301381 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-config-data" (OuterVolumeSpecName: "config-data") pod "be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" (UID: "be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.301961 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-78d958758-q982d" event={"ID":"0f2e50f0-ca1d-4b8f-b284-cbf7069e1279","Type":"ContainerStarted","Data":"cfec0ef7882373e9d85f53467f52e6facf98a571fe9b7384033cb2d5e1eddfec"} Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.302036 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.302107 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.327888 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"26df44bd-bd05-4ed3-b146-fa1111db982e","Type":"ContainerDied","Data":"c6483ee91692fbc07589167785b8ec10c441b37a93cbade02ab25644abdfad91"} Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.328088 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.330350 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rs9pr" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.365107 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-97468ff8-5bxdm" podStartSLOduration=4.245735982 podStartE2EDuration="15.365074913s" podCreationTimestamp="2026-01-30 12:21:00 +0000 UTC" firstStartedPulling="2026-01-30 12:21:01.816920109 +0000 UTC m=+1497.594741763" lastFinishedPulling="2026-01-30 12:21:12.93625904 +0000 UTC m=+1508.714080694" observedRunningTime="2026-01-30 12:21:15.31260546 +0000 UTC m=+1511.090427134" watchObservedRunningTime="2026-01-30 12:21:15.365074913 +0000 UTC m=+1511.142896567" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.394860 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-78d958758-q982d" podStartSLOduration=7.394827165 podStartE2EDuration="7.394827165s" podCreationTimestamp="2026-01-30 12:21:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:21:15.35188912 +0000 UTC m=+1511.129710774" watchObservedRunningTime="2026-01-30 12:21:15.394827165 +0000 UTC m=+1511.172648849" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.402410 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.405291 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerStarted","Data":"3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127"} Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.465458 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rs9pr" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.485797 4703 scope.go:117] "RemoveContainer" containerID="882c8d3f52bc214e09cae7afc718096bfcafe9fc070703dab4d7ee410e6167f4" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.534948 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.548589 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.550698 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-655b7696db-jp55x" podUID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.178:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.552246 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-655b7696db-jp55x" podUID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.178:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.552581 4703 scope.go:117] "RemoveContainer" containerID="ddfffd2731b492f8a028180da68fad74e9d83ce5f5552968d2ed9fc2d5b71b5d" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.575226 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 30 12:21:15 crc kubenswrapper[4703]: E0130 12:21:15.575973 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" containerName="cinder-api-log" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.576004 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" containerName="cinder-api-log" Jan 30 12:21:15 crc kubenswrapper[4703]: E0130 12:21:15.576026 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26df44bd-bd05-4ed3-b146-fa1111db982e" containerName="watcher-decision-engine" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.576035 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="26df44bd-bd05-4ed3-b146-fa1111db982e" containerName="watcher-decision-engine" Jan 30 12:21:15 crc kubenswrapper[4703]: E0130 12:21:15.576108 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" containerName="cinder-api" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.576134 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" containerName="cinder-api" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.576414 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" containerName="cinder-api-log" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.576446 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" containerName="cinder-api" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.576462 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="26df44bd-bd05-4ed3-b146-fa1111db982e" containerName="watcher-decision-engine" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.577602 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.583084 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.622675 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.682048 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.704810 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.723973 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.724360 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.733740 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3ccea95-1c2e-461a-9fc9-6b0171be170f-logs\") pod \"watcher-decision-engine-0\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.734083 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-config-data\") pod \"watcher-decision-engine-0\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.749781 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jx4vn\" (UniqueName: \"kubernetes.io/projected/c3ccea95-1c2e-461a-9fc9-6b0171be170f-kube-api-access-jx4vn\") pod \"watcher-decision-engine-0\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.731320 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.752824 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.754009 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.777914 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.778304 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.778446 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.854481 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cbd06273-19f7-4051-aad3-5a9ea641cfc4-logs\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.854899 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.855001 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.855101 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3ccea95-1c2e-461a-9fc9-6b0171be170f-logs\") pod \"watcher-decision-engine-0\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.855217 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.855325 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-config-data-custom\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.855442 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-config-data\") pod \"watcher-decision-engine-0\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.855536 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-public-tls-certs\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.855619 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-config-data\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.855731 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.855815 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sksp9\" (UniqueName: \"kubernetes.io/projected/cbd06273-19f7-4051-aad3-5a9ea641cfc4-kube-api-access-sksp9\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.855952 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-scripts\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.856032 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cbd06273-19f7-4051-aad3-5a9ea641cfc4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.856114 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jx4vn\" (UniqueName: \"kubernetes.io/projected/c3ccea95-1c2e-461a-9fc9-6b0171be170f-kube-api-access-jx4vn\") pod \"watcher-decision-engine-0\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.857156 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3ccea95-1c2e-461a-9fc9-6b0171be170f-logs\") pod \"watcher-decision-engine-0\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.866061 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.866061 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-config-data\") pod \"watcher-decision-engine-0\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.866814 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.889944 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jx4vn\" (UniqueName: \"kubernetes.io/projected/c3ccea95-1c2e-461a-9fc9-6b0171be170f-kube-api-access-jx4vn\") pod \"watcher-decision-engine-0\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.954920 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.958615 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cbd06273-19f7-4051-aad3-5a9ea641cfc4-logs\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.958738 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.958788 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-config-data-custom\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.958867 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-public-tls-certs\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.958912 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-config-data\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.958953 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.958973 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sksp9\" (UniqueName: \"kubernetes.io/projected/cbd06273-19f7-4051-aad3-5a9ea641cfc4-kube-api-access-sksp9\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.959042 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-scripts\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.959069 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cbd06273-19f7-4051-aad3-5a9ea641cfc4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.959218 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/cbd06273-19f7-4051-aad3-5a9ea641cfc4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.959660 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cbd06273-19f7-4051-aad3-5a9ea641cfc4-logs\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.962326 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rs9pr"] Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.970459 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-scripts\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.973870 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.974209 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-public-tls-certs\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.974940 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.976259 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-config-data\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.978568 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/cbd06273-19f7-4051-aad3-5a9ea641cfc4-config-data-custom\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:15 crc kubenswrapper[4703]: I0130 12:21:15.992818 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sksp9\" (UniqueName: \"kubernetes.io/projected/cbd06273-19f7-4051-aad3-5a9ea641cfc4-kube-api-access-sksp9\") pod \"cinder-api-0\" (UID: \"cbd06273-19f7-4051-aad3-5a9ea641cfc4\") " pod="openstack/cinder-api-0" Jan 30 12:21:16 crc kubenswrapper[4703]: I0130 12:21:16.110633 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 30 12:21:16 crc kubenswrapper[4703]: I0130 12:21:16.513950 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-57d7c7f95-fngdp" event={"ID":"f8d5f833-0ceb-4dc6-bb87-ce670386ef8b","Type":"ContainerStarted","Data":"9939db9ea5202539e03722acfab9288ef304c812f01374889ee179bf65673a5a"} Jan 30 12:21:16 crc kubenswrapper[4703]: I0130 12:21:16.547405 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-655b7696db-jp55x" podUID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.178:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 12:21:16 crc kubenswrapper[4703]: I0130 12:21:16.548150 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-655b7696db-jp55x" podUID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.178:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 12:21:16 crc kubenswrapper[4703]: I0130 12:21:16.566095 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-57d7c7f95-fngdp" podStartSLOduration=4.864028421 podStartE2EDuration="16.56606857s" podCreationTimestamp="2026-01-30 12:21:00 +0000 UTC" firstStartedPulling="2026-01-30 12:21:02.182732511 +0000 UTC m=+1497.960554165" lastFinishedPulling="2026-01-30 12:21:13.88477266 +0000 UTC m=+1509.662594314" observedRunningTime="2026-01-30 12:21:16.56182909 +0000 UTC m=+1512.339650754" watchObservedRunningTime="2026-01-30 12:21:16.56606857 +0000 UTC m=+1512.343890224" Jan 30 12:21:16 crc kubenswrapper[4703]: I0130 12:21:16.858020 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 30 12:21:17 crc kubenswrapper[4703]: I0130 12:21:17.172102 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26df44bd-bd05-4ed3-b146-fa1111db982e" path="/var/lib/kubelet/pods/26df44bd-bd05-4ed3-b146-fa1111db982e/volumes" Jan 30 12:21:17 crc kubenswrapper[4703]: I0130 12:21:17.173628 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f" path="/var/lib/kubelet/pods/be1448d1-2faa-4d2d-9d5b-f6bfb2f08c3f/volumes" Jan 30 12:21:17 crc kubenswrapper[4703]: I0130 12:21:17.174419 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 30 12:21:17 crc kubenswrapper[4703]: I0130 12:21:17.539474 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"c3ccea95-1c2e-461a-9fc9-6b0171be170f","Type":"ContainerStarted","Data":"002a39559e805060a86bd47867a39eb47bf978c9607df987d9a5ed7bf160a38e"} Jan 30 12:21:17 crc kubenswrapper[4703]: I0130 12:21:17.539747 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rs9pr" podUID="7d3ec3ba-8eec-4f98-8948-6cf24d5638b7" containerName="registry-server" containerID="cri-o://70f5c889b4c2bd9dd1520ea57b9c934dadf736c579867781c71e8645c9b8cca7" gracePeriod=2 Jan 30 12:21:17 crc kubenswrapper[4703]: W0130 12:21:17.863571 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcbd06273_19f7_4051_aad3_5a9ea641cfc4.slice/crio-517acee585e9c39a4df4472f025677e800a25cc8fafe29661912be4cdca130c6 WatchSource:0}: Error finding container 517acee585e9c39a4df4472f025677e800a25cc8fafe29661912be4cdca130c6: Status 404 returned error can't find the container with id 517acee585e9c39a4df4472f025677e800a25cc8fafe29661912be4cdca130c6 Jan 30 12:21:17 crc kubenswrapper[4703]: I0130 12:21:17.968813 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 30 12:21:18 crc kubenswrapper[4703]: I0130 12:21:18.573210 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 30 12:21:18 crc kubenswrapper[4703]: I0130 12:21:18.657048 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cbd06273-19f7-4051-aad3-5a9ea641cfc4","Type":"ContainerStarted","Data":"517acee585e9c39a4df4472f025677e800a25cc8fafe29661912be4cdca130c6"} Jan 30 12:21:18 crc kubenswrapper[4703]: I0130 12:21:18.681355 4703 generic.go:334] "Generic (PLEG): container finished" podID="7d3ec3ba-8eec-4f98-8948-6cf24d5638b7" containerID="70f5c889b4c2bd9dd1520ea57b9c934dadf736c579867781c71e8645c9b8cca7" exitCode=0 Jan 30 12:21:18 crc kubenswrapper[4703]: I0130 12:21:18.682964 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rs9pr" event={"ID":"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7","Type":"ContainerDied","Data":"70f5c889b4c2bd9dd1520ea57b9c934dadf736c579867781c71e8645c9b8cca7"} Jan 30 12:21:18 crc kubenswrapper[4703]: I0130 12:21:18.755235 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 30 12:21:18 crc kubenswrapper[4703]: I0130 12:21:18.776356 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:21:18 crc kubenswrapper[4703]: I0130 12:21:18.888352 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rs9pr" Jan 30 12:21:18 crc kubenswrapper[4703]: I0130 12:21:18.921172 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-xmmwk"] Jan 30 12:21:18 crc kubenswrapper[4703]: I0130 12:21:18.922205 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" podUID="c24494ec-321b-4936-b75b-860fd4bce09b" containerName="dnsmasq-dns" containerID="cri-o://e2ac3ce601974b00bff7b2f070d085019bf50b31648c83b4f12c49345ad136b4" gracePeriod=10 Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:18.999400 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-utilities\") pod \"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7\" (UID: \"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7\") " Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:18.999452 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vd9rn\" (UniqueName: \"kubernetes.io/projected/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-kube-api-access-vd9rn\") pod \"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7\" (UID: \"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7\") " Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:18.999684 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-catalog-content\") pod \"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7\" (UID: \"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7\") " Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:19.000827 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-utilities" (OuterVolumeSpecName: "utilities") pod "7d3ec3ba-8eec-4f98-8948-6cf24d5638b7" (UID: "7d3ec3ba-8eec-4f98-8948-6cf24d5638b7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:19.026745 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-kube-api-access-vd9rn" (OuterVolumeSpecName: "kube-api-access-vd9rn") pod "7d3ec3ba-8eec-4f98-8948-6cf24d5638b7" (UID: "7d3ec3ba-8eec-4f98-8948-6cf24d5638b7"). InnerVolumeSpecName "kube-api-access-vd9rn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:19.126876 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:19.126948 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vd9rn\" (UniqueName: \"kubernetes.io/projected/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-kube-api-access-vd9rn\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:19.288883 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7d3ec3ba-8eec-4f98-8948-6cf24d5638b7" (UID: "7d3ec3ba-8eec-4f98-8948-6cf24d5638b7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:19.335471 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:19.733508 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"c3ccea95-1c2e-461a-9fc9-6b0171be170f","Type":"ContainerStarted","Data":"10872e2658151d3736775d33af2478aa6681e5ac8f132d1c99e4a34d7e0d129f"} Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:19.791572 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=4.791537556 podStartE2EDuration="4.791537556s" podCreationTimestamp="2026-01-30 12:21:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:21:19.756204159 +0000 UTC m=+1515.534025823" watchObservedRunningTime="2026-01-30 12:21:19.791537556 +0000 UTC m=+1515.569359210" Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:19.804826 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rs9pr" event={"ID":"7d3ec3ba-8eec-4f98-8948-6cf24d5638b7","Type":"ContainerDied","Data":"2f9f47d26b6c056d8b2354e5fefd86ff9110ab9e314c17600789a2e9393a6153"} Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:19.804923 4703 scope.go:117] "RemoveContainer" containerID="70f5c889b4c2bd9dd1520ea57b9c934dadf736c579867781c71e8645c9b8cca7" Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:19.805230 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rs9pr" Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:19.816678 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" podUID="c24494ec-321b-4936-b75b-860fd4bce09b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.167:5353: connect: connection refused" Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:19.836421 4703 generic.go:334] "Generic (PLEG): container finished" podID="c24494ec-321b-4936-b75b-860fd4bce09b" containerID="e2ac3ce601974b00bff7b2f070d085019bf50b31648c83b4f12c49345ad136b4" exitCode=0 Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:19.837503 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="c37c42a3-786f-45bf-a2e1-1fa57ebe162a" containerName="cinder-scheduler" containerID="cri-o://a3031845f0de92d153609ad1fbd8a01f948f39897de754e443aa2a8189672435" gracePeriod=30 Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:19.837931 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" event={"ID":"c24494ec-321b-4936-b75b-860fd4bce09b","Type":"ContainerDied","Data":"e2ac3ce601974b00bff7b2f070d085019bf50b31648c83b4f12c49345ad136b4"} Jan 30 12:21:19 crc kubenswrapper[4703]: I0130 12:21:19.838705 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="c37c42a3-786f-45bf-a2e1-1fa57ebe162a" containerName="probe" containerID="cri-o://9d6aec883a6c98ecb2815ec50810b4d1f4f8752ea5e25d2b268fccf7e8d52e89" gracePeriod=30 Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.252221 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rs9pr"] Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.260235 4703 scope.go:117] "RemoveContainer" containerID="e8a16a34faaf526bb60f2149ff055b4011dee35837306d7acf0f3543329b1789" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.288482 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rs9pr"] Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.474968 4703 scope.go:117] "RemoveContainer" containerID="f0bc4d0cf50dadc27cbff3e7da907925bf792d2179e6e754dccf330f69c8291b" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.509549 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5f9958979d-8h859" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.159:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.159:8443: connect: connection refused" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.522402 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.522860 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.615733 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fv4cj\" (UniqueName: \"kubernetes.io/projected/c24494ec-321b-4936-b75b-860fd4bce09b-kube-api-access-fv4cj\") pod \"c24494ec-321b-4936-b75b-860fd4bce09b\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.619444 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-ovsdbserver-sb\") pod \"c24494ec-321b-4936-b75b-860fd4bce09b\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.619627 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-config\") pod \"c24494ec-321b-4936-b75b-860fd4bce09b\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.619696 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-dns-swift-storage-0\") pod \"c24494ec-321b-4936-b75b-860fd4bce09b\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.619776 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-dns-svc\") pod \"c24494ec-321b-4936-b75b-860fd4bce09b\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.619840 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-ovsdbserver-nb\") pod \"c24494ec-321b-4936-b75b-860fd4bce09b\" (UID: \"c24494ec-321b-4936-b75b-860fd4bce09b\") " Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.624265 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c24494ec-321b-4936-b75b-860fd4bce09b-kube-api-access-fv4cj" (OuterVolumeSpecName: "kube-api-access-fv4cj") pod "c24494ec-321b-4936-b75b-860fd4bce09b" (UID: "c24494ec-321b-4936-b75b-860fd4bce09b"). InnerVolumeSpecName "kube-api-access-fv4cj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.630901 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fv4cj\" (UniqueName: \"kubernetes.io/projected/c24494ec-321b-4936-b75b-860fd4bce09b-kube-api-access-fv4cj\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.740093 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.770269 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c24494ec-321b-4936-b75b-860fd4bce09b" (UID: "c24494ec-321b-4936-b75b-860fd4bce09b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.788284 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-config" (OuterVolumeSpecName: "config") pod "c24494ec-321b-4936-b75b-860fd4bce09b" (UID: "c24494ec-321b-4936-b75b-860fd4bce09b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.790508 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c24494ec-321b-4936-b75b-860fd4bce09b" (UID: "c24494ec-321b-4936-b75b-860fd4bce09b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.798949 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c24494ec-321b-4936-b75b-860fd4bce09b" (UID: "c24494ec-321b-4936-b75b-860fd4bce09b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.834436 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/572a81eb-68df-470a-9ca8-1febfc6167ad-log-httpd\") pod \"572a81eb-68df-470a-9ca8-1febfc6167ad\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.834591 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-combined-ca-bundle\") pod \"572a81eb-68df-470a-9ca8-1febfc6167ad\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.834799 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/572a81eb-68df-470a-9ca8-1febfc6167ad-run-httpd\") pod \"572a81eb-68df-470a-9ca8-1febfc6167ad\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.834839 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-scripts\") pod \"572a81eb-68df-470a-9ca8-1febfc6167ad\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.834874 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-sg-core-conf-yaml\") pod \"572a81eb-68df-470a-9ca8-1febfc6167ad\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.834891 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-config-data\") pod \"572a81eb-68df-470a-9ca8-1febfc6167ad\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.834938 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gzxc\" (UniqueName: \"kubernetes.io/projected/572a81eb-68df-470a-9ca8-1febfc6167ad-kube-api-access-9gzxc\") pod \"572a81eb-68df-470a-9ca8-1febfc6167ad\" (UID: \"572a81eb-68df-470a-9ca8-1febfc6167ad\") " Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.836242 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/572a81eb-68df-470a-9ca8-1febfc6167ad-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "572a81eb-68df-470a-9ca8-1febfc6167ad" (UID: "572a81eb-68df-470a-9ca8-1febfc6167ad"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.836818 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c24494ec-321b-4936-b75b-860fd4bce09b" (UID: "c24494ec-321b-4936-b75b-860fd4bce09b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.837060 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/572a81eb-68df-470a-9ca8-1febfc6167ad-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "572a81eb-68df-470a-9ca8-1febfc6167ad" (UID: "572a81eb-68df-470a-9ca8-1febfc6167ad"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.842772 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/572a81eb-68df-470a-9ca8-1febfc6167ad-kube-api-access-9gzxc" (OuterVolumeSpecName: "kube-api-access-9gzxc") pod "572a81eb-68df-470a-9ca8-1febfc6167ad" (UID: "572a81eb-68df-470a-9ca8-1febfc6167ad"). InnerVolumeSpecName "kube-api-access-9gzxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.848854 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.848891 4703 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.848908 4703 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.848923 4703 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/572a81eb-68df-470a-9ca8-1febfc6167ad-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.850008 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.850033 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9gzxc\" (UniqueName: \"kubernetes.io/projected/572a81eb-68df-470a-9ca8-1febfc6167ad-kube-api-access-9gzxc\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.850044 4703 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/572a81eb-68df-470a-9ca8-1febfc6167ad-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.850054 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c24494ec-321b-4936-b75b-860fd4bce09b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.850187 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "572a81eb-68df-470a-9ca8-1febfc6167ad" (UID: "572a81eb-68df-470a-9ca8-1febfc6167ad"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.864464 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-scripts" (OuterVolumeSpecName: "scripts") pod "572a81eb-68df-470a-9ca8-1febfc6167ad" (UID: "572a81eb-68df-470a-9ca8-1febfc6167ad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.882871 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" event={"ID":"c24494ec-321b-4936-b75b-860fd4bce09b","Type":"ContainerDied","Data":"46d79ed257ab9a1cff8c326e30033fab7480990ef7dd62be1e3243df814ec3a0"} Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.882967 4703 scope.go:117] "RemoveContainer" containerID="e2ac3ce601974b00bff7b2f070d085019bf50b31648c83b4f12c49345ad136b4" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.883201 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55f844cf75-xmmwk" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.884580 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-77fb4cf9b8-pw692" podUID="9c6d3262-7469-45ac-b5c8-9eb0f9456a5a" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.160:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.160:8443: connect: connection refused" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.903256 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cbd06273-19f7-4051-aad3-5a9ea641cfc4","Type":"ContainerStarted","Data":"e215d27153d11067902e92d424063fd9dd10f3d25b9c1b25d8362348ea10e105"} Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.930835 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.932036 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"572a81eb-68df-470a-9ca8-1febfc6167ad","Type":"ContainerDied","Data":"e45423bb48b132ba7e486e33996b065b164471f574f6dbe34b326e3be4a01e94"} Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.930664 4703 generic.go:334] "Generic (PLEG): container finished" podID="572a81eb-68df-470a-9ca8-1febfc6167ad" containerID="e45423bb48b132ba7e486e33996b065b164471f574f6dbe34b326e3be4a01e94" exitCode=137 Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.932431 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"572a81eb-68df-470a-9ca8-1febfc6167ad","Type":"ContainerDied","Data":"b05f4e7a82c3437a39d5b2966ed03267f9dce9ce85f5df36a02d00f2258df1aa"} Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.956235 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.956585 4703 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.974756 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-xmmwk"] Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.982426 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-config-data" (OuterVolumeSpecName: "config-data") pod "572a81eb-68df-470a-9ca8-1febfc6167ad" (UID: "572a81eb-68df-470a-9ca8-1febfc6167ad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:20 crc kubenswrapper[4703]: I0130 12:21:20.987185 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55f844cf75-xmmwk"] Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.001756 4703 scope.go:117] "RemoveContainer" containerID="7c113a00f5149b9744478a061a1a656abfd3e4722192c94585220e40203b9b6c" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.062384 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "572a81eb-68df-470a-9ca8-1febfc6167ad" (UID: "572a81eb-68df-470a-9ca8-1febfc6167ad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.067926 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.067963 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/572a81eb-68df-470a-9ca8-1febfc6167ad-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.079804 4703 scope.go:117] "RemoveContainer" containerID="e45423bb48b132ba7e486e33996b065b164471f574f6dbe34b326e3be4a01e94" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.118564 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d3ec3ba-8eec-4f98-8948-6cf24d5638b7" path="/var/lib/kubelet/pods/7d3ec3ba-8eec-4f98-8948-6cf24d5638b7/volumes" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.120092 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c24494ec-321b-4936-b75b-860fd4bce09b" path="/var/lib/kubelet/pods/c24494ec-321b-4936-b75b-860fd4bce09b/volumes" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.121033 4703 scope.go:117] "RemoveContainer" containerID="37a63d0f9e4cd08d66ff7b2a23c08291f9552b813721b66cae5547fe2c7dfabb" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.309756 4703 scope.go:117] "RemoveContainer" containerID="e45423bb48b132ba7e486e33996b065b164471f574f6dbe34b326e3be4a01e94" Jan 30 12:21:21 crc kubenswrapper[4703]: E0130 12:21:21.320477 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e45423bb48b132ba7e486e33996b065b164471f574f6dbe34b326e3be4a01e94\": container with ID starting with e45423bb48b132ba7e486e33996b065b164471f574f6dbe34b326e3be4a01e94 not found: ID does not exist" containerID="e45423bb48b132ba7e486e33996b065b164471f574f6dbe34b326e3be4a01e94" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.320568 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e45423bb48b132ba7e486e33996b065b164471f574f6dbe34b326e3be4a01e94"} err="failed to get container status \"e45423bb48b132ba7e486e33996b065b164471f574f6dbe34b326e3be4a01e94\": rpc error: code = NotFound desc = could not find container \"e45423bb48b132ba7e486e33996b065b164471f574f6dbe34b326e3be4a01e94\": container with ID starting with e45423bb48b132ba7e486e33996b065b164471f574f6dbe34b326e3be4a01e94 not found: ID does not exist" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.320664 4703 scope.go:117] "RemoveContainer" containerID="37a63d0f9e4cd08d66ff7b2a23c08291f9552b813721b66cae5547fe2c7dfabb" Jan 30 12:21:21 crc kubenswrapper[4703]: E0130 12:21:21.328995 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37a63d0f9e4cd08d66ff7b2a23c08291f9552b813721b66cae5547fe2c7dfabb\": container with ID starting with 37a63d0f9e4cd08d66ff7b2a23c08291f9552b813721b66cae5547fe2c7dfabb not found: ID does not exist" containerID="37a63d0f9e4cd08d66ff7b2a23c08291f9552b813721b66cae5547fe2c7dfabb" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.329085 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37a63d0f9e4cd08d66ff7b2a23c08291f9552b813721b66cae5547fe2c7dfabb"} err="failed to get container status \"37a63d0f9e4cd08d66ff7b2a23c08291f9552b813721b66cae5547fe2c7dfabb\": rpc error: code = NotFound desc = could not find container \"37a63d0f9e4cd08d66ff7b2a23c08291f9552b813721b66cae5547fe2c7dfabb\": container with ID starting with 37a63d0f9e4cd08d66ff7b2a23c08291f9552b813721b66cae5547fe2c7dfabb not found: ID does not exist" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.515244 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.515827 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.544223 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:21:21 crc kubenswrapper[4703]: E0130 12:21:21.545091 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c24494ec-321b-4936-b75b-860fd4bce09b" containerName="dnsmasq-dns" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.545135 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="c24494ec-321b-4936-b75b-860fd4bce09b" containerName="dnsmasq-dns" Jan 30 12:21:21 crc kubenswrapper[4703]: E0130 12:21:21.545178 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d3ec3ba-8eec-4f98-8948-6cf24d5638b7" containerName="extract-utilities" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.545188 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d3ec3ba-8eec-4f98-8948-6cf24d5638b7" containerName="extract-utilities" Jan 30 12:21:21 crc kubenswrapper[4703]: E0130 12:21:21.545205 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c24494ec-321b-4936-b75b-860fd4bce09b" containerName="init" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.545220 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="c24494ec-321b-4936-b75b-860fd4bce09b" containerName="init" Jan 30 12:21:21 crc kubenswrapper[4703]: E0130 12:21:21.545241 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d3ec3ba-8eec-4f98-8948-6cf24d5638b7" containerName="extract-content" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.545249 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d3ec3ba-8eec-4f98-8948-6cf24d5638b7" containerName="extract-content" Jan 30 12:21:21 crc kubenswrapper[4703]: E0130 12:21:21.545259 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="572a81eb-68df-470a-9ca8-1febfc6167ad" containerName="proxy-httpd" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.545266 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="572a81eb-68df-470a-9ca8-1febfc6167ad" containerName="proxy-httpd" Jan 30 12:21:21 crc kubenswrapper[4703]: E0130 12:21:21.545290 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d3ec3ba-8eec-4f98-8948-6cf24d5638b7" containerName="registry-server" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.545297 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d3ec3ba-8eec-4f98-8948-6cf24d5638b7" containerName="registry-server" Jan 30 12:21:21 crc kubenswrapper[4703]: E0130 12:21:21.545311 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="572a81eb-68df-470a-9ca8-1febfc6167ad" containerName="ceilometer-notification-agent" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.545319 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="572a81eb-68df-470a-9ca8-1febfc6167ad" containerName="ceilometer-notification-agent" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.545578 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="572a81eb-68df-470a-9ca8-1febfc6167ad" containerName="ceilometer-notification-agent" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.545601 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="572a81eb-68df-470a-9ca8-1febfc6167ad" containerName="proxy-httpd" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.545636 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="c24494ec-321b-4936-b75b-860fd4bce09b" containerName="dnsmasq-dns" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.545649 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d3ec3ba-8eec-4f98-8948-6cf24d5638b7" containerName="registry-server" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.547774 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.553157 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.553454 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.559412 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fe91c0fe-118b-45a0-8e84-cb3691406a11-run-httpd\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.559704 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fe91c0fe-118b-45a0-8e84-cb3691406a11-log-httpd\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.559921 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.565567 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnd49\" (UniqueName: \"kubernetes.io/projected/fe91c0fe-118b-45a0-8e84-cb3691406a11-kube-api-access-pnd49\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.565783 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-scripts\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.566289 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-config-data\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.566515 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.578078 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.606662 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-655b7696db-jp55x" podUID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.178:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.615323 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.690967 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.691081 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fe91c0fe-118b-45a0-8e84-cb3691406a11-run-httpd\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.691163 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fe91c0fe-118b-45a0-8e84-cb3691406a11-log-httpd\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.691317 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.691361 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnd49\" (UniqueName: \"kubernetes.io/projected/fe91c0fe-118b-45a0-8e84-cb3691406a11-kube-api-access-pnd49\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.691411 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-scripts\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.691618 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-config-data\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.693628 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fe91c0fe-118b-45a0-8e84-cb3691406a11-log-httpd\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.702981 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fe91c0fe-118b-45a0-8e84-cb3691406a11-run-httpd\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.706681 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-config-data\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.711308 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.714080 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.738850 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-scripts\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.744064 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnd49\" (UniqueName: \"kubernetes.io/projected/fe91c0fe-118b-45a0-8e84-cb3691406a11-kube-api-access-pnd49\") pod \"ceilometer-0\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.903941 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.975492 4703 generic.go:334] "Generic (PLEG): container finished" podID="c37c42a3-786f-45bf-a2e1-1fa57ebe162a" containerID="9d6aec883a6c98ecb2815ec50810b4d1f4f8752ea5e25d2b268fccf7e8d52e89" exitCode=0 Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.975599 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c37c42a3-786f-45bf-a2e1-1fa57ebe162a","Type":"ContainerDied","Data":"9d6aec883a6c98ecb2815ec50810b4d1f4f8752ea5e25d2b268fccf7e8d52e89"} Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.984349 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"cbd06273-19f7-4051-aad3-5a9ea641cfc4","Type":"ContainerStarted","Data":"a1554581087b68de3d70b00dfac1f4ab0dc294169e3d3cbec8d3a046eac52c7e"} Jan 30 12:21:21 crc kubenswrapper[4703]: I0130 12:21:21.985330 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 30 12:21:22 crc kubenswrapper[4703]: I0130 12:21:22.036815 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=7.036783881 podStartE2EDuration="7.036783881s" podCreationTimestamp="2026-01-30 12:21:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:21:22.014331657 +0000 UTC m=+1517.792153341" watchObservedRunningTime="2026-01-30 12:21:22.036783881 +0000 UTC m=+1517.814605545" Jan 30 12:21:22 crc kubenswrapper[4703]: I0130 12:21:22.579454 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:21:22 crc kubenswrapper[4703]: I0130 12:21:22.771144 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:22 crc kubenswrapper[4703]: I0130 12:21:22.806219 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:21:23 crc kubenswrapper[4703]: I0130 12:21:23.157194 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="572a81eb-68df-470a-9ca8-1febfc6167ad" path="/var/lib/kubelet/pods/572a81eb-68df-470a-9ca8-1febfc6167ad/volumes" Jan 30 12:21:23 crc kubenswrapper[4703]: I0130 12:21:23.159009 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fe91c0fe-118b-45a0-8e84-cb3691406a11","Type":"ContainerStarted","Data":"7eb0bbbf215e94763a281f5d742c7ec0492ed6b58754391933e70a37f14f0257"} Jan 30 12:21:23 crc kubenswrapper[4703]: I0130 12:21:23.840228 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-79994f947f-qcpzj"] Jan 30 12:21:23 crc kubenswrapper[4703]: I0130 12:21:23.843005 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:23 crc kubenswrapper[4703]: I0130 12:21:23.848218 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 30 12:21:23 crc kubenswrapper[4703]: I0130 12:21:23.848463 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 30 12:21:23 crc kubenswrapper[4703]: I0130 12:21:23.848462 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 30 12:21:23 crc kubenswrapper[4703]: I0130 12:21:23.875644 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-79994f947f-qcpzj"] Jan 30 12:21:23 crc kubenswrapper[4703]: I0130 12:21:23.965272 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-run-httpd\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:23 crc kubenswrapper[4703]: I0130 12:21:23.965345 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqxwc\" (UniqueName: \"kubernetes.io/projected/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-kube-api-access-rqxwc\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:23 crc kubenswrapper[4703]: I0130 12:21:23.965448 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-config-data\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:23 crc kubenswrapper[4703]: I0130 12:21:23.965557 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-combined-ca-bundle\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:23 crc kubenswrapper[4703]: I0130 12:21:23.965614 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-internal-tls-certs\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:23 crc kubenswrapper[4703]: I0130 12:21:23.965717 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-etc-swift\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:23 crc kubenswrapper[4703]: I0130 12:21:23.965760 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-log-httpd\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:23 crc kubenswrapper[4703]: I0130 12:21:23.965816 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-public-tls-certs\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.067988 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-log-httpd\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.068170 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-public-tls-certs\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.068205 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-run-httpd\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.068223 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqxwc\" (UniqueName: \"kubernetes.io/projected/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-kube-api-access-rqxwc\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.068291 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-config-data\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.068401 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-combined-ca-bundle\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.068454 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-internal-tls-certs\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.068566 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-etc-swift\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.070810 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-run-httpd\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.071504 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-log-httpd\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.083737 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-config-data\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.084687 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-internal-tls-certs\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.089869 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-etc-swift\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.090882 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-combined-ca-bundle\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.107062 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-public-tls-certs\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.110116 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqxwc\" (UniqueName: \"kubernetes.io/projected/f2a53a72-a42a-42aa-b018-50ca9a7fa9aa-kube-api-access-rqxwc\") pod \"swift-proxy-79994f947f-qcpzj\" (UID: \"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa\") " pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.205942 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fe91c0fe-118b-45a0-8e84-cb3691406a11","Type":"ContainerStarted","Data":"3e0cf78c4445fcdf950205a07640f86e5cdf1d3467b091e47df112bea318af16"} Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.227939 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.252923 4703 generic.go:334] "Generic (PLEG): container finished" podID="c37c42a3-786f-45bf-a2e1-1fa57ebe162a" containerID="a3031845f0de92d153609ad1fbd8a01f948f39897de754e443aa2a8189672435" exitCode=0 Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.252986 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c37c42a3-786f-45bf-a2e1-1fa57ebe162a","Type":"ContainerDied","Data":"a3031845f0de92d153609ad1fbd8a01f948f39897de754e443aa2a8189672435"} Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.296320 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.395289 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-etc-machine-id\") pod \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.395372 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-config-data\") pod \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.395404 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-combined-ca-bundle\") pod \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.395598 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-scripts\") pod \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.395719 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2brxx\" (UniqueName: \"kubernetes.io/projected/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-kube-api-access-2brxx\") pod \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.395761 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-config-data-custom\") pod \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\" (UID: \"c37c42a3-786f-45bf-a2e1-1fa57ebe162a\") " Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.402347 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "c37c42a3-786f-45bf-a2e1-1fa57ebe162a" (UID: "c37c42a3-786f-45bf-a2e1-1fa57ebe162a"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.413747 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-kube-api-access-2brxx" (OuterVolumeSpecName: "kube-api-access-2brxx") pod "c37c42a3-786f-45bf-a2e1-1fa57ebe162a" (UID: "c37c42a3-786f-45bf-a2e1-1fa57ebe162a"). InnerVolumeSpecName "kube-api-access-2brxx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.417404 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c37c42a3-786f-45bf-a2e1-1fa57ebe162a" (UID: "c37c42a3-786f-45bf-a2e1-1fa57ebe162a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.425685 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-scripts" (OuterVolumeSpecName: "scripts") pod "c37c42a3-786f-45bf-a2e1-1fa57ebe162a" (UID: "c37c42a3-786f-45bf-a2e1-1fa57ebe162a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.500761 4703 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.500795 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.500807 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2brxx\" (UniqueName: \"kubernetes.io/projected/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-kube-api-access-2brxx\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.500850 4703 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.519462 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c37c42a3-786f-45bf-a2e1-1fa57ebe162a" (UID: "c37c42a3-786f-45bf-a2e1-1fa57ebe162a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.619982 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.634251 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-config-data" (OuterVolumeSpecName: "config-data") pod "c37c42a3-786f-45bf-a2e1-1fa57ebe162a" (UID: "c37c42a3-786f-45bf-a2e1-1fa57ebe162a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:24 crc kubenswrapper[4703]: I0130 12:21:24.722767 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c37c42a3-786f-45bf-a2e1-1fa57ebe162a-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.257303 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-79994f947f-qcpzj"] Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.275381 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fe91c0fe-118b-45a0-8e84-cb3691406a11","Type":"ContainerStarted","Data":"f53922975bea1b467c263a9b216c9f77e0f57a7caca10a1d2e7139e57386799d"} Jan 30 12:21:25 crc kubenswrapper[4703]: W0130 12:21:25.279660 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2a53a72_a42a_42aa_b018_50ca9a7fa9aa.slice/crio-24a3a93626d877179837002c762acd16fae44c6e8ae37493db7151e212dfd277 WatchSource:0}: Error finding container 24a3a93626d877179837002c762acd16fae44c6e8ae37493db7151e212dfd277: Status 404 returned error can't find the container with id 24a3a93626d877179837002c762acd16fae44c6e8ae37493db7151e212dfd277 Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.288694 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"c37c42a3-786f-45bf-a2e1-1fa57ebe162a","Type":"ContainerDied","Data":"6905fd089a0ff764cf1265bf46d7da7264bc8b69942506c37c6058093e12d47d"} Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.288792 4703 scope.go:117] "RemoveContainer" containerID="9d6aec883a6c98ecb2815ec50810b4d1f4f8752ea5e25d2b268fccf7e8d52e89" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.288918 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.338407 4703 scope.go:117] "RemoveContainer" containerID="a3031845f0de92d153609ad1fbd8a01f948f39897de754e443aa2a8189672435" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.355574 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.407890 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.436391 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 30 12:21:25 crc kubenswrapper[4703]: E0130 12:21:25.437109 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c37c42a3-786f-45bf-a2e1-1fa57ebe162a" containerName="cinder-scheduler" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.437158 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="c37c42a3-786f-45bf-a2e1-1fa57ebe162a" containerName="cinder-scheduler" Jan 30 12:21:25 crc kubenswrapper[4703]: E0130 12:21:25.437181 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c37c42a3-786f-45bf-a2e1-1fa57ebe162a" containerName="probe" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.437188 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="c37c42a3-786f-45bf-a2e1-1fa57ebe162a" containerName="probe" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.437482 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="c37c42a3-786f-45bf-a2e1-1fa57ebe162a" containerName="cinder-scheduler" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.437511 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="c37c42a3-786f-45bf-a2e1-1fa57ebe162a" containerName="probe" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.438902 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.445652 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.458207 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.580766 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-scripts\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.580890 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kjfk\" (UniqueName: \"kubernetes.io/projected/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-kube-api-access-4kjfk\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.580991 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.581074 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.581288 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-config-data\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.581396 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.682750 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-scripts\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.683348 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kjfk\" (UniqueName: \"kubernetes.io/projected/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-kube-api-access-4kjfk\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.683418 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.683452 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.683519 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-config-data\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.683564 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.684015 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.708513 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.708859 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kjfk\" (UniqueName: \"kubernetes.io/projected/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-kube-api-access-4kjfk\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.708929 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-scripts\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.709226 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-config-data\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.712761 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3b8e1d2e-960f-47aa-b6d0-f327ecdd5880-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880\") " pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.861883 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 30 12:21:25 crc kubenswrapper[4703]: I0130 12:21:25.956032 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 30 12:21:26 crc kubenswrapper[4703]: I0130 12:21:26.029901 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Jan 30 12:21:26 crc kubenswrapper[4703]: I0130 12:21:26.337098 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fe91c0fe-118b-45a0-8e84-cb3691406a11","Type":"ContainerStarted","Data":"068ba57f0ae3f793336fd962e58b2d5696abbf4db0fd345a8329fbf826843e6e"} Jan 30 12:21:26 crc kubenswrapper[4703]: I0130 12:21:26.345204 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-79994f947f-qcpzj" event={"ID":"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa","Type":"ContainerStarted","Data":"725f9e64e75a996ee30bfc175a94799e304d3c543e9c2129a89af7882f82ba8c"} Jan 30 12:21:26 crc kubenswrapper[4703]: I0130 12:21:26.345280 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-79994f947f-qcpzj" event={"ID":"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa","Type":"ContainerStarted","Data":"a1b46c5b785a7f6d8cfead467c717439f251b110ba13c694b41c181cadcd112b"} Jan 30 12:21:26 crc kubenswrapper[4703]: I0130 12:21:26.345298 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-79994f947f-qcpzj" event={"ID":"f2a53a72-a42a-42aa-b018-50ca9a7fa9aa","Type":"ContainerStarted","Data":"24a3a93626d877179837002c762acd16fae44c6e8ae37493db7151e212dfd277"} Jan 30 12:21:26 crc kubenswrapper[4703]: I0130 12:21:26.345641 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:26 crc kubenswrapper[4703]: I0130 12:21:26.345680 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:26 crc kubenswrapper[4703]: I0130 12:21:26.353833 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Jan 30 12:21:26 crc kubenswrapper[4703]: I0130 12:21:26.392083 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-79994f947f-qcpzj" podStartSLOduration=3.392059686 podStartE2EDuration="3.392059686s" podCreationTimestamp="2026-01-30 12:21:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:21:26.388038521 +0000 UTC m=+1522.165860175" watchObservedRunningTime="2026-01-30 12:21:26.392059686 +0000 UTC m=+1522.169881340" Jan 30 12:21:26 crc kubenswrapper[4703]: I0130 12:21:26.433384 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Jan 30 12:21:26 crc kubenswrapper[4703]: I0130 12:21:26.724037 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 30 12:21:27 crc kubenswrapper[4703]: I0130 12:21:27.099822 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c37c42a3-786f-45bf-a2e1-1fa57ebe162a" path="/var/lib/kubelet/pods/c37c42a3-786f-45bf-a2e1-1fa57ebe162a/volumes" Jan 30 12:21:27 crc kubenswrapper[4703]: I0130 12:21:27.149619 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-78d958758-q982d" Jan 30 12:21:27 crc kubenswrapper[4703]: I0130 12:21:27.270463 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-655b7696db-jp55x"] Jan 30 12:21:27 crc kubenswrapper[4703]: I0130 12:21:27.271307 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-655b7696db-jp55x" podUID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" containerName="barbican-api-log" containerID="cri-o://653cc576beb0aa27ee6d08fbeb0e44bca965d94c921c6662d2cce93ef06cfefe" gracePeriod=30 Jan 30 12:21:27 crc kubenswrapper[4703]: I0130 12:21:27.271471 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-655b7696db-jp55x" podUID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" containerName="barbican-api" containerID="cri-o://655e99fe1eacd608561bffbe785ec2376d1022f007050665de9607175d526fce" gracePeriod=30 Jan 30 12:21:27 crc kubenswrapper[4703]: I0130 12:21:27.414311 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880","Type":"ContainerStarted","Data":"6677c81fb07d4d9aea054efafde35fc554a6e88c1bf863678c3faf3b0e2bead1"} Jan 30 12:21:28 crc kubenswrapper[4703]: I0130 12:21:28.112782 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7ccfb848f-k68gw" Jan 30 12:21:28 crc kubenswrapper[4703]: I0130 12:21:28.251494 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-676db989fc-4rthl"] Jan 30 12:21:28 crc kubenswrapper[4703]: I0130 12:21:28.257204 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-676db989fc-4rthl" podUID="62d4e769-8427-473f-8184-be89133bb4bc" containerName="neutron-api" containerID="cri-o://4c41d93f84f1c54e2ae7edb332eb5ae9e18ea295b155d77b1e1f1f76559bfaf4" gracePeriod=30 Jan 30 12:21:28 crc kubenswrapper[4703]: I0130 12:21:28.257731 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-676db989fc-4rthl" podUID="62d4e769-8427-473f-8184-be89133bb4bc" containerName="neutron-httpd" containerID="cri-o://a598bc6d257190579fae80d1f2fe82b84ac0ae7ac2e04a7cb34954a853b0fd27" gracePeriod=30 Jan 30 12:21:28 crc kubenswrapper[4703]: I0130 12:21:28.512441 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880","Type":"ContainerStarted","Data":"af38286e7492c97c1652f5d2a36ea79ce9857f098898441b580810617f15169c"} Jan 30 12:21:28 crc kubenswrapper[4703]: I0130 12:21:28.528954 4703 generic.go:334] "Generic (PLEG): container finished" podID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" containerID="653cc576beb0aa27ee6d08fbeb0e44bca965d94c921c6662d2cce93ef06cfefe" exitCode=143 Jan 30 12:21:28 crc kubenswrapper[4703]: I0130 12:21:28.529944 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-655b7696db-jp55x" event={"ID":"2c6d5132-9798-418e-92c7-bb0b50b3fd47","Type":"ContainerDied","Data":"653cc576beb0aa27ee6d08fbeb0e44bca965d94c921c6662d2cce93ef06cfefe"} Jan 30 12:21:29 crc kubenswrapper[4703]: I0130 12:21:29.551786 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fe91c0fe-118b-45a0-8e84-cb3691406a11","Type":"ContainerStarted","Data":"89ee06fee150532b9c98b0751dc0ad5f71c7b806b29a356ab56e67a4ef77ff14"} Jan 30 12:21:29 crc kubenswrapper[4703]: I0130 12:21:29.552240 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerName="ceilometer-central-agent" containerID="cri-o://3e0cf78c4445fcdf950205a07640f86e5cdf1d3467b091e47df112bea318af16" gracePeriod=30 Jan 30 12:21:29 crc kubenswrapper[4703]: I0130 12:21:29.553228 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerName="proxy-httpd" containerID="cri-o://89ee06fee150532b9c98b0751dc0ad5f71c7b806b29a356ab56e67a4ef77ff14" gracePeriod=30 Jan 30 12:21:29 crc kubenswrapper[4703]: I0130 12:21:29.553293 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerName="ceilometer-notification-agent" containerID="cri-o://f53922975bea1b467c263a9b216c9f77e0f57a7caca10a1d2e7139e57386799d" gracePeriod=30 Jan 30 12:21:29 crc kubenswrapper[4703]: I0130 12:21:29.553370 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 30 12:21:29 crc kubenswrapper[4703]: I0130 12:21:29.553403 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerName="sg-core" containerID="cri-o://068ba57f0ae3f793336fd962e58b2d5696abbf4db0fd345a8329fbf826843e6e" gracePeriod=30 Jan 30 12:21:29 crc kubenswrapper[4703]: I0130 12:21:29.582553 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"3b8e1d2e-960f-47aa-b6d0-f327ecdd5880","Type":"ContainerStarted","Data":"c68526d288cd0b31315adf59fc5db53bdfb2f21fb1bfc804866c51c6b566be65"} Jan 30 12:21:29 crc kubenswrapper[4703]: I0130 12:21:29.593801 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.299933203 podStartE2EDuration="8.593771285s" podCreationTimestamp="2026-01-30 12:21:21 +0000 UTC" firstStartedPulling="2026-01-30 12:21:22.585226288 +0000 UTC m=+1518.363047942" lastFinishedPulling="2026-01-30 12:21:28.87906437 +0000 UTC m=+1524.656886024" observedRunningTime="2026-01-30 12:21:29.576991279 +0000 UTC m=+1525.354812933" watchObservedRunningTime="2026-01-30 12:21:29.593771285 +0000 UTC m=+1525.371592939" Jan 30 12:21:29 crc kubenswrapper[4703]: I0130 12:21:29.614812 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.61477676 podStartE2EDuration="4.61477676s" podCreationTimestamp="2026-01-30 12:21:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:21:29.611527676 +0000 UTC m=+1525.389349330" watchObservedRunningTime="2026-01-30 12:21:29.61477676 +0000 UTC m=+1525.392598414" Jan 30 12:21:29 crc kubenswrapper[4703]: I0130 12:21:29.627705 4703 generic.go:334] "Generic (PLEG): container finished" podID="62d4e769-8427-473f-8184-be89133bb4bc" containerID="a598bc6d257190579fae80d1f2fe82b84ac0ae7ac2e04a7cb34954a853b0fd27" exitCode=0 Jan 30 12:21:29 crc kubenswrapper[4703]: I0130 12:21:29.627784 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-676db989fc-4rthl" event={"ID":"62d4e769-8427-473f-8184-be89133bb4bc","Type":"ContainerDied","Data":"a598bc6d257190579fae80d1f2fe82b84ac0ae7ac2e04a7cb34954a853b0fd27"} Jan 30 12:21:30 crc kubenswrapper[4703]: I0130 12:21:30.505332 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5f9958979d-8h859" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.159:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.159:8443: connect: connection refused" Jan 30 12:21:30 crc kubenswrapper[4703]: I0130 12:21:30.505516 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:21:30 crc kubenswrapper[4703]: I0130 12:21:30.507292 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"d757dd822a89b950ce6e9d4dc97199e2572ee94ccf2beca2ecace35453f877f7"} pod="openstack/horizon-5f9958979d-8h859" containerMessage="Container horizon failed startup probe, will be restarted" Jan 30 12:21:30 crc kubenswrapper[4703]: I0130 12:21:30.507364 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5f9958979d-8h859" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" containerID="cri-o://d757dd822a89b950ce6e9d4dc97199e2572ee94ccf2beca2ecace35453f877f7" gracePeriod=30 Jan 30 12:21:30 crc kubenswrapper[4703]: I0130 12:21:30.660082 4703 generic.go:334] "Generic (PLEG): container finished" podID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerID="89ee06fee150532b9c98b0751dc0ad5f71c7b806b29a356ab56e67a4ef77ff14" exitCode=0 Jan 30 12:21:30 crc kubenswrapper[4703]: I0130 12:21:30.660493 4703 generic.go:334] "Generic (PLEG): container finished" podID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerID="068ba57f0ae3f793336fd962e58b2d5696abbf4db0fd345a8329fbf826843e6e" exitCode=2 Jan 30 12:21:30 crc kubenswrapper[4703]: I0130 12:21:30.660504 4703 generic.go:334] "Generic (PLEG): container finished" podID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerID="f53922975bea1b467c263a9b216c9f77e0f57a7caca10a1d2e7139e57386799d" exitCode=0 Jan 30 12:21:30 crc kubenswrapper[4703]: I0130 12:21:30.661839 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fe91c0fe-118b-45a0-8e84-cb3691406a11","Type":"ContainerDied","Data":"89ee06fee150532b9c98b0751dc0ad5f71c7b806b29a356ab56e67a4ef77ff14"} Jan 30 12:21:30 crc kubenswrapper[4703]: I0130 12:21:30.661882 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fe91c0fe-118b-45a0-8e84-cb3691406a11","Type":"ContainerDied","Data":"068ba57f0ae3f793336fd962e58b2d5696abbf4db0fd345a8329fbf826843e6e"} Jan 30 12:21:30 crc kubenswrapper[4703]: I0130 12:21:30.661897 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fe91c0fe-118b-45a0-8e84-cb3691406a11","Type":"ContainerDied","Data":"f53922975bea1b467c263a9b216c9f77e0f57a7caca10a1d2e7139e57386799d"} Jan 30 12:21:30 crc kubenswrapper[4703]: I0130 12:21:30.863189 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 30 12:21:30 crc kubenswrapper[4703]: I0130 12:21:30.879173 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-77fb4cf9b8-pw692" podUID="9c6d3262-7469-45ac-b5c8-9eb0f9456a5a" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.160:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.160:8443: connect: connection refused" Jan 30 12:21:30 crc kubenswrapper[4703]: I0130 12:21:30.879307 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:21:30 crc kubenswrapper[4703]: I0130 12:21:30.880584 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"fda2400a9ade21756ca56d52218ebcddb5692194c691d158ef06b11e6b0694d2"} pod="openstack/horizon-77fb4cf9b8-pw692" containerMessage="Container horizon failed startup probe, will be restarted" Jan 30 12:21:30 crc kubenswrapper[4703]: I0130 12:21:30.880641 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-77fb4cf9b8-pw692" podUID="9c6d3262-7469-45ac-b5c8-9eb0f9456a5a" containerName="horizon" containerID="cri-o://fda2400a9ade21756ca56d52218ebcddb5692194c691d158ef06b11e6b0694d2" gracePeriod=30 Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.120638 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="cbd06273-19f7-4051-aad3-5a9ea641cfc4" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.185:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.501179 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.653320 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c6d5132-9798-418e-92c7-bb0b50b3fd47-logs\") pod \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.653503 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-combined-ca-bundle\") pod \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.653544 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-config-data-custom\") pod \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.653677 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-config-data\") pod \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.653748 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z64wn\" (UniqueName: \"kubernetes.io/projected/2c6d5132-9798-418e-92c7-bb0b50b3fd47-kube-api-access-z64wn\") pod \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\" (UID: \"2c6d5132-9798-418e-92c7-bb0b50b3fd47\") " Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.655602 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c6d5132-9798-418e-92c7-bb0b50b3fd47-logs" (OuterVolumeSpecName: "logs") pod "2c6d5132-9798-418e-92c7-bb0b50b3fd47" (UID: "2c6d5132-9798-418e-92c7-bb0b50b3fd47"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.664325 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2c6d5132-9798-418e-92c7-bb0b50b3fd47" (UID: "2c6d5132-9798-418e-92c7-bb0b50b3fd47"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.736183 4703 generic.go:334] "Generic (PLEG): container finished" podID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" containerID="655e99fe1eacd608561bffbe785ec2376d1022f007050665de9607175d526fce" exitCode=0 Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.736347 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-655b7696db-jp55x" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.736385 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-655b7696db-jp55x" event={"ID":"2c6d5132-9798-418e-92c7-bb0b50b3fd47","Type":"ContainerDied","Data":"655e99fe1eacd608561bffbe785ec2376d1022f007050665de9607175d526fce"} Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.736699 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-655b7696db-jp55x" event={"ID":"2c6d5132-9798-418e-92c7-bb0b50b3fd47","Type":"ContainerDied","Data":"f2c4ddfb02f2a74fbd7114cddbce48dcf39c210a7a99466f076980e2882d73b5"} Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.736728 4703 scope.go:117] "RemoveContainer" containerID="655e99fe1eacd608561bffbe785ec2376d1022f007050665de9607175d526fce" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.740418 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c6d5132-9798-418e-92c7-bb0b50b3fd47-kube-api-access-z64wn" (OuterVolumeSpecName: "kube-api-access-z64wn") pod "2c6d5132-9798-418e-92c7-bb0b50b3fd47" (UID: "2c6d5132-9798-418e-92c7-bb0b50b3fd47"). InnerVolumeSpecName "kube-api-access-z64wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.745419 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2c6d5132-9798-418e-92c7-bb0b50b3fd47" (UID: "2c6d5132-9798-418e-92c7-bb0b50b3fd47"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.757163 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z64wn\" (UniqueName: \"kubernetes.io/projected/2c6d5132-9798-418e-92c7-bb0b50b3fd47-kube-api-access-z64wn\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.757213 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c6d5132-9798-418e-92c7-bb0b50b3fd47-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.757224 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.757233 4703 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.825316 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-config-data" (OuterVolumeSpecName: "config-data") pod "2c6d5132-9798-418e-92c7-bb0b50b3fd47" (UID: "2c6d5132-9798-418e-92c7-bb0b50b3fd47"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.859314 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c6d5132-9798-418e-92c7-bb0b50b3fd47-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.935468 4703 scope.go:117] "RemoveContainer" containerID="653cc576beb0aa27ee6d08fbeb0e44bca965d94c921c6662d2cce93ef06cfefe" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.974855 4703 scope.go:117] "RemoveContainer" containerID="655e99fe1eacd608561bffbe785ec2376d1022f007050665de9607175d526fce" Jan 30 12:21:31 crc kubenswrapper[4703]: E0130 12:21:31.976056 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"655e99fe1eacd608561bffbe785ec2376d1022f007050665de9607175d526fce\": container with ID starting with 655e99fe1eacd608561bffbe785ec2376d1022f007050665de9607175d526fce not found: ID does not exist" containerID="655e99fe1eacd608561bffbe785ec2376d1022f007050665de9607175d526fce" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.976111 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"655e99fe1eacd608561bffbe785ec2376d1022f007050665de9607175d526fce"} err="failed to get container status \"655e99fe1eacd608561bffbe785ec2376d1022f007050665de9607175d526fce\": rpc error: code = NotFound desc = could not find container \"655e99fe1eacd608561bffbe785ec2376d1022f007050665de9607175d526fce\": container with ID starting with 655e99fe1eacd608561bffbe785ec2376d1022f007050665de9607175d526fce not found: ID does not exist" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.976164 4703 scope.go:117] "RemoveContainer" containerID="653cc576beb0aa27ee6d08fbeb0e44bca965d94c921c6662d2cce93ef06cfefe" Jan 30 12:21:31 crc kubenswrapper[4703]: E0130 12:21:31.976499 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"653cc576beb0aa27ee6d08fbeb0e44bca965d94c921c6662d2cce93ef06cfefe\": container with ID starting with 653cc576beb0aa27ee6d08fbeb0e44bca965d94c921c6662d2cce93ef06cfefe not found: ID does not exist" containerID="653cc576beb0aa27ee6d08fbeb0e44bca965d94c921c6662d2cce93ef06cfefe" Jan 30 12:21:31 crc kubenswrapper[4703]: I0130 12:21:31.976547 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"653cc576beb0aa27ee6d08fbeb0e44bca965d94c921c6662d2cce93ef06cfefe"} err="failed to get container status \"653cc576beb0aa27ee6d08fbeb0e44bca965d94c921c6662d2cce93ef06cfefe\": rpc error: code = NotFound desc = could not find container \"653cc576beb0aa27ee6d08fbeb0e44bca965d94c921c6662d2cce93ef06cfefe\": container with ID starting with 653cc576beb0aa27ee6d08fbeb0e44bca965d94c921c6662d2cce93ef06cfefe not found: ID does not exist" Jan 30 12:21:32 crc kubenswrapper[4703]: I0130 12:21:32.103056 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-655b7696db-jp55x"] Jan 30 12:21:32 crc kubenswrapper[4703]: I0130 12:21:32.121034 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-655b7696db-jp55x"] Jan 30 12:21:32 crc kubenswrapper[4703]: I0130 12:21:32.233250 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 30 12:21:32 crc kubenswrapper[4703]: I0130 12:21:32.233611 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/watcher-decision-engine-0" podUID="c3ccea95-1c2e-461a-9fc9-6b0171be170f" containerName="watcher-decision-engine" containerID="cri-o://10872e2658151d3736775d33af2478aa6681e5ac8f132d1c99e4a34d7e0d129f" gracePeriod=30 Jan 30 12:21:33 crc kubenswrapper[4703]: I0130 12:21:33.112483 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" path="/var/lib/kubelet/pods/2c6d5132-9798-418e-92c7-bb0b50b3fd47/volumes" Jan 30 12:21:34 crc kubenswrapper[4703]: I0130 12:21:34.109698 4703 generic.go:334] "Generic (PLEG): container finished" podID="62d4e769-8427-473f-8184-be89133bb4bc" containerID="4c41d93f84f1c54e2ae7edb332eb5ae9e18ea295b155d77b1e1f1f76559bfaf4" exitCode=0 Jan 30 12:21:34 crc kubenswrapper[4703]: I0130 12:21:34.110257 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-676db989fc-4rthl" event={"ID":"62d4e769-8427-473f-8184-be89133bb4bc","Type":"ContainerDied","Data":"4c41d93f84f1c54e2ae7edb332eb5ae9e18ea295b155d77b1e1f1f76559bfaf4"} Jan 30 12:21:34 crc kubenswrapper[4703]: I0130 12:21:34.123164 4703 generic.go:334] "Generic (PLEG): container finished" podID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerID="3e0cf78c4445fcdf950205a07640f86e5cdf1d3467b091e47df112bea318af16" exitCode=0 Jan 30 12:21:34 crc kubenswrapper[4703]: I0130 12:21:34.123237 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fe91c0fe-118b-45a0-8e84-cb3691406a11","Type":"ContainerDied","Data":"3e0cf78c4445fcdf950205a07640f86e5cdf1d3467b091e47df112bea318af16"} Jan 30 12:21:34 crc kubenswrapper[4703]: I0130 12:21:34.235784 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:34 crc kubenswrapper[4703]: I0130 12:21:34.237663 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-79994f947f-qcpzj" Jan 30 12:21:34 crc kubenswrapper[4703]: I0130 12:21:34.878752 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 30 12:21:36 crc kubenswrapper[4703]: I0130 12:21:36.279240 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 30 12:21:36 crc kubenswrapper[4703]: I0130 12:21:36.459074 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-655b7696db-jp55x" podUID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.178:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 12:21:36 crc kubenswrapper[4703]: I0130 12:21:36.459251 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-655b7696db-jp55x" podUID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.178:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 12:21:40 crc kubenswrapper[4703]: I0130 12:21:40.232894 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"c3ccea95-1c2e-461a-9fc9-6b0171be170f","Type":"ContainerDied","Data":"10872e2658151d3736775d33af2478aa6681e5ac8f132d1c99e4a34d7e0d129f"} Jan 30 12:21:40 crc kubenswrapper[4703]: I0130 12:21:40.233553 4703 generic.go:334] "Generic (PLEG): container finished" podID="c3ccea95-1c2e-461a-9fc9-6b0171be170f" containerID="10872e2658151d3736775d33af2478aa6681e5ac8f132d1c99e4a34d7e0d129f" exitCode=0 Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.773670 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.849353 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.860403 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-public-tls-certs\") pod \"62d4e769-8427-473f-8184-be89133bb4bc\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.860487 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-custom-prometheus-ca\") pod \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.860526 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-ovndb-tls-certs\") pod \"62d4e769-8427-473f-8184-be89133bb4bc\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.860628 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3ccea95-1c2e-461a-9fc9-6b0171be170f-logs\") pod \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.860660 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-config\") pod \"62d4e769-8427-473f-8184-be89133bb4bc\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.860687 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-internal-tls-certs\") pod \"62d4e769-8427-473f-8184-be89133bb4bc\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.860744 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-combined-ca-bundle\") pod \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.862266 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3ccea95-1c2e-461a-9fc9-6b0171be170f-logs" (OuterVolumeSpecName: "logs") pod "c3ccea95-1c2e-461a-9fc9-6b0171be170f" (UID: "c3ccea95-1c2e-461a-9fc9-6b0171be170f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.860849 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c5rwf\" (UniqueName: \"kubernetes.io/projected/62d4e769-8427-473f-8184-be89133bb4bc-kube-api-access-c5rwf\") pod \"62d4e769-8427-473f-8184-be89133bb4bc\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.868599 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-httpd-config\") pod \"62d4e769-8427-473f-8184-be89133bb4bc\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.868834 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-combined-ca-bundle\") pod \"62d4e769-8427-473f-8184-be89133bb4bc\" (UID: \"62d4e769-8427-473f-8184-be89133bb4bc\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.868906 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-config-data\") pod \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.868971 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jx4vn\" (UniqueName: \"kubernetes.io/projected/c3ccea95-1c2e-461a-9fc9-6b0171be170f-kube-api-access-jx4vn\") pod \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\" (UID: \"c3ccea95-1c2e-461a-9fc9-6b0171be170f\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.870193 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3ccea95-1c2e-461a-9fc9-6b0171be170f-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.896225 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3ccea95-1c2e-461a-9fc9-6b0171be170f-kube-api-access-jx4vn" (OuterVolumeSpecName: "kube-api-access-jx4vn") pod "c3ccea95-1c2e-461a-9fc9-6b0171be170f" (UID: "c3ccea95-1c2e-461a-9fc9-6b0171be170f"). InnerVolumeSpecName "kube-api-access-jx4vn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.897893 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "62d4e769-8427-473f-8184-be89133bb4bc" (UID: "62d4e769-8427-473f-8184-be89133bb4bc"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.901228 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62d4e769-8427-473f-8184-be89133bb4bc-kube-api-access-c5rwf" (OuterVolumeSpecName: "kube-api-access-c5rwf") pod "62d4e769-8427-473f-8184-be89133bb4bc" (UID: "62d4e769-8427-473f-8184-be89133bb4bc"). InnerVolumeSpecName "kube-api-access-c5rwf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.945026 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.971254 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-sg-core-conf-yaml\") pod \"fe91c0fe-118b-45a0-8e84-cb3691406a11\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.971528 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pnd49\" (UniqueName: \"kubernetes.io/projected/fe91c0fe-118b-45a0-8e84-cb3691406a11-kube-api-access-pnd49\") pod \"fe91c0fe-118b-45a0-8e84-cb3691406a11\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.971694 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fe91c0fe-118b-45a0-8e84-cb3691406a11-log-httpd\") pod \"fe91c0fe-118b-45a0-8e84-cb3691406a11\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.971805 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fe91c0fe-118b-45a0-8e84-cb3691406a11-run-httpd\") pod \"fe91c0fe-118b-45a0-8e84-cb3691406a11\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.971912 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-scripts\") pod \"fe91c0fe-118b-45a0-8e84-cb3691406a11\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.971942 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-config-data\") pod \"fe91c0fe-118b-45a0-8e84-cb3691406a11\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.971984 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-combined-ca-bundle\") pod \"fe91c0fe-118b-45a0-8e84-cb3691406a11\" (UID: \"fe91c0fe-118b-45a0-8e84-cb3691406a11\") " Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.972700 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c5rwf\" (UniqueName: \"kubernetes.io/projected/62d4e769-8427-473f-8184-be89133bb4bc-kube-api-access-c5rwf\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.972729 4703 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.972746 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jx4vn\" (UniqueName: \"kubernetes.io/projected/c3ccea95-1c2e-461a-9fc9-6b0171be170f-kube-api-access-jx4vn\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.973482 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe91c0fe-118b-45a0-8e84-cb3691406a11-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "fe91c0fe-118b-45a0-8e84-cb3691406a11" (UID: "fe91c0fe-118b-45a0-8e84-cb3691406a11"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.973831 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe91c0fe-118b-45a0-8e84-cb3691406a11-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "fe91c0fe-118b-45a0-8e84-cb3691406a11" (UID: "fe91c0fe-118b-45a0-8e84-cb3691406a11"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.983862 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe91c0fe-118b-45a0-8e84-cb3691406a11-kube-api-access-pnd49" (OuterVolumeSpecName: "kube-api-access-pnd49") pod "fe91c0fe-118b-45a0-8e84-cb3691406a11" (UID: "fe91c0fe-118b-45a0-8e84-cb3691406a11"). InnerVolumeSpecName "kube-api-access-pnd49". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:21:41 crc kubenswrapper[4703]: I0130 12:21:41.988237 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-scripts" (OuterVolumeSpecName: "scripts") pod "fe91c0fe-118b-45a0-8e84-cb3691406a11" (UID: "fe91c0fe-118b-45a0-8e84-cb3691406a11"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.001915 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-custom-prometheus-ca" (OuterVolumeSpecName: "custom-prometheus-ca") pod "c3ccea95-1c2e-461a-9fc9-6b0171be170f" (UID: "c3ccea95-1c2e-461a-9fc9-6b0171be170f"). InnerVolumeSpecName "custom-prometheus-ca". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.013435 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c3ccea95-1c2e-461a-9fc9-6b0171be170f" (UID: "c3ccea95-1c2e-461a-9fc9-6b0171be170f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.034913 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "62d4e769-8427-473f-8184-be89133bb4bc" (UID: "62d4e769-8427-473f-8184-be89133bb4bc"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.050940 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "62d4e769-8427-473f-8184-be89133bb4bc" (UID: "62d4e769-8427-473f-8184-be89133bb4bc"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.060987 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "fe91c0fe-118b-45a0-8e84-cb3691406a11" (UID: "fe91c0fe-118b-45a0-8e84-cb3691406a11"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.063934 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "62d4e769-8427-473f-8184-be89133bb4bc" (UID: "62d4e769-8427-473f-8184-be89133bb4bc"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.064237 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "62d4e769-8427-473f-8184-be89133bb4bc" (UID: "62d4e769-8427-473f-8184-be89133bb4bc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.073552 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-config" (OuterVolumeSpecName: "config") pod "62d4e769-8427-473f-8184-be89133bb4bc" (UID: "62d4e769-8427-473f-8184-be89133bb4bc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.075562 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pnd49\" (UniqueName: \"kubernetes.io/projected/fe91c0fe-118b-45a0-8e84-cb3691406a11-kube-api-access-pnd49\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.075617 4703 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.075631 4703 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fe91c0fe-118b-45a0-8e84-cb3691406a11-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.075645 4703 reconciler_common.go:293] "Volume detached for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-custom-prometheus-ca\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.075685 4703 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.075698 4703 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fe91c0fe-118b-45a0-8e84-cb3691406a11-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.075760 4703 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.075777 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.075789 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.075802 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.075859 4703 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.075874 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62d4e769-8427-473f-8184-be89133bb4bc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.084180 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-config-data" (OuterVolumeSpecName: "config-data") pod "c3ccea95-1c2e-461a-9fc9-6b0171be170f" (UID: "c3ccea95-1c2e-461a-9fc9-6b0171be170f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.136146 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fe91c0fe-118b-45a0-8e84-cb3691406a11" (UID: "fe91c0fe-118b-45a0-8e84-cb3691406a11"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.143350 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-config-data" (OuterVolumeSpecName: "config-data") pod "fe91c0fe-118b-45a0-8e84-cb3691406a11" (UID: "fe91c0fe-118b-45a0-8e84-cb3691406a11"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.180119 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3ccea95-1c2e-461a-9fc9-6b0171be170f-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.180637 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.180840 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe91c0fe-118b-45a0-8e84-cb3691406a11-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.271618 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"64e4eca7-b75f-4ac6-ba29-f017d5aec24e","Type":"ContainerStarted","Data":"5f9748c8e76c0f3204c8eac78d83e9d330b8e167b165c550d631e59bbb639090"} Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.275995 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"c3ccea95-1c2e-461a-9fc9-6b0171be170f","Type":"ContainerDied","Data":"002a39559e805060a86bd47867a39eb47bf978c9607df987d9a5ed7bf160a38e"} Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.276082 4703 scope.go:117] "RemoveContainer" containerID="10872e2658151d3736775d33af2478aa6681e5ac8f132d1c99e4a34d7e0d129f" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.276868 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.281258 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-676db989fc-4rthl" event={"ID":"62d4e769-8427-473f-8184-be89133bb4bc","Type":"ContainerDied","Data":"eb09ba8e596f8e819a7d62c2369dfaa1aaee80e315637fd5a5b3d58a38602ff3"} Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.281646 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-676db989fc-4rthl" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.287806 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fe91c0fe-118b-45a0-8e84-cb3691406a11","Type":"ContainerDied","Data":"7eb0bbbf215e94763a281f5d742c7ec0492ed6b58754391933e70a37f14f0257"} Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.288044 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.321747 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.620778799 podStartE2EDuration="31.321715224s" podCreationTimestamp="2026-01-30 12:21:11 +0000 UTC" firstStartedPulling="2026-01-30 12:21:13.818937659 +0000 UTC m=+1509.596759313" lastFinishedPulling="2026-01-30 12:21:41.519874084 +0000 UTC m=+1537.297695738" observedRunningTime="2026-01-30 12:21:42.319584778 +0000 UTC m=+1538.097406442" watchObservedRunningTime="2026-01-30 12:21:42.321715224 +0000 UTC m=+1538.099536888" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.324575 4703 scope.go:117] "RemoveContainer" containerID="a598bc6d257190579fae80d1f2fe82b84ac0ae7ac2e04a7cb34954a853b0fd27" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.394871 4703 scope.go:117] "RemoveContainer" containerID="4c41d93f84f1c54e2ae7edb332eb5ae9e18ea295b155d77b1e1f1f76559bfaf4" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.395180 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.413813 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.424849 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-676db989fc-4rthl"] Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.436486 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-676db989fc-4rthl"] Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.451150 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 30 12:21:42 crc kubenswrapper[4703]: E0130 12:21:42.451984 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerName="ceilometer-central-agent" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452004 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerName="ceilometer-central-agent" Jan 30 12:21:42 crc kubenswrapper[4703]: E0130 12:21:42.452013 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerName="sg-core" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452019 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerName="sg-core" Jan 30 12:21:42 crc kubenswrapper[4703]: E0130 12:21:42.452041 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" containerName="barbican-api" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452050 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" containerName="barbican-api" Jan 30 12:21:42 crc kubenswrapper[4703]: E0130 12:21:42.452064 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62d4e769-8427-473f-8184-be89133bb4bc" containerName="neutron-httpd" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452069 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="62d4e769-8427-473f-8184-be89133bb4bc" containerName="neutron-httpd" Jan 30 12:21:42 crc kubenswrapper[4703]: E0130 12:21:42.452083 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3ccea95-1c2e-461a-9fc9-6b0171be170f" containerName="watcher-decision-engine" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452089 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3ccea95-1c2e-461a-9fc9-6b0171be170f" containerName="watcher-decision-engine" Jan 30 12:21:42 crc kubenswrapper[4703]: E0130 12:21:42.452099 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerName="proxy-httpd" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452105 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerName="proxy-httpd" Jan 30 12:21:42 crc kubenswrapper[4703]: E0130 12:21:42.452155 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" containerName="barbican-api-log" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452163 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" containerName="barbican-api-log" Jan 30 12:21:42 crc kubenswrapper[4703]: E0130 12:21:42.452180 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62d4e769-8427-473f-8184-be89133bb4bc" containerName="neutron-api" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452188 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="62d4e769-8427-473f-8184-be89133bb4bc" containerName="neutron-api" Jan 30 12:21:42 crc kubenswrapper[4703]: E0130 12:21:42.452206 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerName="ceilometer-notification-agent" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452214 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerName="ceilometer-notification-agent" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452449 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="62d4e769-8427-473f-8184-be89133bb4bc" containerName="neutron-api" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452465 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="62d4e769-8427-473f-8184-be89133bb4bc" containerName="neutron-httpd" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452474 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerName="proxy-httpd" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452490 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerName="sg-core" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452519 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerName="ceilometer-central-agent" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452534 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" containerName="barbican-api-log" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452551 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c6d5132-9798-418e-92c7-bb0b50b3fd47" containerName="barbican-api" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452567 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3ccea95-1c2e-461a-9fc9-6b0171be170f" containerName="watcher-decision-engine" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.452578 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe91c0fe-118b-45a0-8e84-cb3691406a11" containerName="ceilometer-notification-agent" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.453595 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.459046 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"watcher-decision-engine-config-data" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.477399 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.486977 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.491182 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a03fc62c-0c38-4b37-b568-661a611eaadd-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"a03fc62c-0c38-4b37-b568-661a611eaadd\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.491246 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a03fc62c-0c38-4b37-b568-661a611eaadd-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"a03fc62c-0c38-4b37-b568-661a611eaadd\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.491278 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a03fc62c-0c38-4b37-b568-661a611eaadd-config-data\") pod \"watcher-decision-engine-0\" (UID: \"a03fc62c-0c38-4b37-b568-661a611eaadd\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.491357 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a03fc62c-0c38-4b37-b568-661a611eaadd-logs\") pod \"watcher-decision-engine-0\" (UID: \"a03fc62c-0c38-4b37-b568-661a611eaadd\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.491381 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmb8d\" (UniqueName: \"kubernetes.io/projected/a03fc62c-0c38-4b37-b568-661a611eaadd-kube-api-access-zmb8d\") pod \"watcher-decision-engine-0\" (UID: \"a03fc62c-0c38-4b37-b568-661a611eaadd\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.508094 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.520740 4703 scope.go:117] "RemoveContainer" containerID="89ee06fee150532b9c98b0751dc0ad5f71c7b806b29a356ab56e67a4ef77ff14" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.526649 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.531246 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.534899 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.536608 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.555980 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.558318 4703 scope.go:117] "RemoveContainer" containerID="068ba57f0ae3f793336fd962e58b2d5696abbf4db0fd345a8329fbf826843e6e" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.665717 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a03fc62c-0c38-4b37-b568-661a611eaadd-logs\") pod \"watcher-decision-engine-0\" (UID: \"a03fc62c-0c38-4b37-b568-661a611eaadd\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.666069 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmb8d\" (UniqueName: \"kubernetes.io/projected/a03fc62c-0c38-4b37-b568-661a611eaadd-kube-api-access-zmb8d\") pod \"watcher-decision-engine-0\" (UID: \"a03fc62c-0c38-4b37-b568-661a611eaadd\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.666282 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb63b182-d322-4146-a62d-7a918fa5ad61-log-httpd\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.666825 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a03fc62c-0c38-4b37-b568-661a611eaadd-logs\") pod \"watcher-decision-engine-0\" (UID: \"a03fc62c-0c38-4b37-b568-661a611eaadd\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.667252 4703 scope.go:117] "RemoveContainer" containerID="f53922975bea1b467c263a9b216c9f77e0f57a7caca10a1d2e7139e57386799d" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.668618 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.668815 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.669552 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-config-data\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.669720 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a03fc62c-0c38-4b37-b568-661a611eaadd-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"a03fc62c-0c38-4b37-b568-661a611eaadd\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.669834 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a03fc62c-0c38-4b37-b568-661a611eaadd-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"a03fc62c-0c38-4b37-b568-661a611eaadd\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.669923 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a03fc62c-0c38-4b37-b568-661a611eaadd-config-data\") pod \"watcher-decision-engine-0\" (UID: \"a03fc62c-0c38-4b37-b568-661a611eaadd\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.670019 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vs2km\" (UniqueName: \"kubernetes.io/projected/bb63b182-d322-4146-a62d-7a918fa5ad61-kube-api-access-vs2km\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.670141 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-scripts\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.670356 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb63b182-d322-4146-a62d-7a918fa5ad61-run-httpd\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.674838 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a03fc62c-0c38-4b37-b568-661a611eaadd-combined-ca-bundle\") pod \"watcher-decision-engine-0\" (UID: \"a03fc62c-0c38-4b37-b568-661a611eaadd\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.676283 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"custom-prometheus-ca\" (UniqueName: \"kubernetes.io/secret/a03fc62c-0c38-4b37-b568-661a611eaadd-custom-prometheus-ca\") pod \"watcher-decision-engine-0\" (UID: \"a03fc62c-0c38-4b37-b568-661a611eaadd\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.678934 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a03fc62c-0c38-4b37-b568-661a611eaadd-config-data\") pod \"watcher-decision-engine-0\" (UID: \"a03fc62c-0c38-4b37-b568-661a611eaadd\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.693826 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmb8d\" (UniqueName: \"kubernetes.io/projected/a03fc62c-0c38-4b37-b568-661a611eaadd-kube-api-access-zmb8d\") pod \"watcher-decision-engine-0\" (UID: \"a03fc62c-0c38-4b37-b568-661a611eaadd\") " pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.773445 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.773555 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-config-data\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.773613 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vs2km\" (UniqueName: \"kubernetes.io/projected/bb63b182-d322-4146-a62d-7a918fa5ad61-kube-api-access-vs2km\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.773636 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-scripts\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.773678 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb63b182-d322-4146-a62d-7a918fa5ad61-run-httpd\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.774224 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb63b182-d322-4146-a62d-7a918fa5ad61-log-httpd\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.774267 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.774463 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb63b182-d322-4146-a62d-7a918fa5ad61-run-httpd\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.774655 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb63b182-d322-4146-a62d-7a918fa5ad61-log-httpd\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.778959 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.780189 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.781625 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-config-data\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.785913 4703 scope.go:117] "RemoveContainer" containerID="3e0cf78c4445fcdf950205a07640f86e5cdf1d3467b091e47df112bea318af16" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.786351 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-scripts\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.795441 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vs2km\" (UniqueName: \"kubernetes.io/projected/bb63b182-d322-4146-a62d-7a918fa5ad61-kube-api-access-vs2km\") pod \"ceilometer-0\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " pod="openstack/ceilometer-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.827000 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/watcher-decision-engine-0" Jan 30 12:21:42 crc kubenswrapper[4703]: I0130 12:21:42.863589 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:21:43 crc kubenswrapper[4703]: I0130 12:21:43.099752 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62d4e769-8427-473f-8184-be89133bb4bc" path="/var/lib/kubelet/pods/62d4e769-8427-473f-8184-be89133bb4bc/volumes" Jan 30 12:21:43 crc kubenswrapper[4703]: I0130 12:21:43.101687 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3ccea95-1c2e-461a-9fc9-6b0171be170f" path="/var/lib/kubelet/pods/c3ccea95-1c2e-461a-9fc9-6b0171be170f/volumes" Jan 30 12:21:43 crc kubenswrapper[4703]: I0130 12:21:43.102320 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe91c0fe-118b-45a0-8e84-cb3691406a11" path="/var/lib/kubelet/pods/fe91c0fe-118b-45a0-8e84-cb3691406a11/volumes" Jan 30 12:21:43 crc kubenswrapper[4703]: I0130 12:21:43.379156 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/watcher-decision-engine-0"] Jan 30 12:21:43 crc kubenswrapper[4703]: I0130 12:21:43.537606 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:21:44 crc kubenswrapper[4703]: I0130 12:21:44.341672 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"a03fc62c-0c38-4b37-b568-661a611eaadd","Type":"ContainerStarted","Data":"5a701aab499cc91b8f2b70679299c29d55117bc1dcec72ccd1fc5402b2600c3a"} Jan 30 12:21:44 crc kubenswrapper[4703]: I0130 12:21:44.342243 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/watcher-decision-engine-0" event={"ID":"a03fc62c-0c38-4b37-b568-661a611eaadd","Type":"ContainerStarted","Data":"c0f79a173ff014c390635c152085678e546db3d2a1589ca446067ffbac0f140f"} Jan 30 12:21:44 crc kubenswrapper[4703]: I0130 12:21:44.344560 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb63b182-d322-4146-a62d-7a918fa5ad61","Type":"ContainerStarted","Data":"08c418b36146fb180e1ae5350db6f861f21cc15a2e03dc2d5e47a615ba82bf8f"} Jan 30 12:21:44 crc kubenswrapper[4703]: I0130 12:21:44.344610 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb63b182-d322-4146-a62d-7a918fa5ad61","Type":"ContainerStarted","Data":"353c8b6e4c7b3529e6a0050c62b854a1e5927fd7cd165c86be6b17a85df06f1b"} Jan 30 12:21:44 crc kubenswrapper[4703]: I0130 12:21:44.366715 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/watcher-decision-engine-0" podStartSLOduration=2.366697735 podStartE2EDuration="2.366697735s" podCreationTimestamp="2026-01-30 12:21:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:21:44.363761668 +0000 UTC m=+1540.141583322" watchObservedRunningTime="2026-01-30 12:21:44.366697735 +0000 UTC m=+1540.144519389" Jan 30 12:21:45 crc kubenswrapper[4703]: I0130 12:21:45.364912 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb63b182-d322-4146-a62d-7a918fa5ad61","Type":"ContainerStarted","Data":"014dde91f8eff00112c137311207bb1865572820e51fa8bcfe2fbebec10573b4"} Jan 30 12:21:45 crc kubenswrapper[4703]: I0130 12:21:45.396278 4703 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod26df44bd-bd05-4ed3-b146-fa1111db982e"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod26df44bd-bd05-4ed3-b146-fa1111db982e] : Timed out while waiting for systemd to remove kubepods-besteffort-pod26df44bd_bd05_4ed3_b146_fa1111db982e.slice" Jan 30 12:21:46 crc kubenswrapper[4703]: I0130 12:21:46.099230 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:21:46 crc kubenswrapper[4703]: I0130 12:21:46.382173 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb63b182-d322-4146-a62d-7a918fa5ad61","Type":"ContainerStarted","Data":"a49e31f3846e4eb083445f41149b614c0c2c3ca4b9985a8acc620ae47fe47d31"} Jan 30 12:21:49 crc kubenswrapper[4703]: I0130 12:21:49.417535 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb63b182-d322-4146-a62d-7a918fa5ad61","Type":"ContainerStarted","Data":"532482240e6cb7e93b40611bb49e68629fe8d076d618c4d9cdec227adc981e94"} Jan 30 12:21:49 crc kubenswrapper[4703]: I0130 12:21:49.418166 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 30 12:21:49 crc kubenswrapper[4703]: I0130 12:21:49.417821 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerName="sg-core" containerID="cri-o://a49e31f3846e4eb083445f41149b614c0c2c3ca4b9985a8acc620ae47fe47d31" gracePeriod=30 Jan 30 12:21:49 crc kubenswrapper[4703]: I0130 12:21:49.417752 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerName="ceilometer-central-agent" containerID="cri-o://08c418b36146fb180e1ae5350db6f861f21cc15a2e03dc2d5e47a615ba82bf8f" gracePeriod=30 Jan 30 12:21:49 crc kubenswrapper[4703]: I0130 12:21:49.417832 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerName="proxy-httpd" containerID="cri-o://532482240e6cb7e93b40611bb49e68629fe8d076d618c4d9cdec227adc981e94" gracePeriod=30 Jan 30 12:21:49 crc kubenswrapper[4703]: I0130 12:21:49.417939 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerName="ceilometer-notification-agent" containerID="cri-o://014dde91f8eff00112c137311207bb1865572820e51fa8bcfe2fbebec10573b4" gracePeriod=30 Jan 30 12:21:49 crc kubenswrapper[4703]: I0130 12:21:49.465954 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.822908005 podStartE2EDuration="7.465928005s" podCreationTimestamp="2026-01-30 12:21:42 +0000 UTC" firstStartedPulling="2026-01-30 12:21:43.551408927 +0000 UTC m=+1539.329230581" lastFinishedPulling="2026-01-30 12:21:48.194428927 +0000 UTC m=+1543.972250581" observedRunningTime="2026-01-30 12:21:49.457818305 +0000 UTC m=+1545.235639959" watchObservedRunningTime="2026-01-30 12:21:49.465928005 +0000 UTC m=+1545.243749659" Jan 30 12:21:50 crc kubenswrapper[4703]: I0130 12:21:50.555027 4703 generic.go:334] "Generic (PLEG): container finished" podID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerID="532482240e6cb7e93b40611bb49e68629fe8d076d618c4d9cdec227adc981e94" exitCode=0 Jan 30 12:21:50 crc kubenswrapper[4703]: I0130 12:21:50.555551 4703 generic.go:334] "Generic (PLEG): container finished" podID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerID="a49e31f3846e4eb083445f41149b614c0c2c3ca4b9985a8acc620ae47fe47d31" exitCode=2 Jan 30 12:21:50 crc kubenswrapper[4703]: I0130 12:21:50.555562 4703 generic.go:334] "Generic (PLEG): container finished" podID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerID="014dde91f8eff00112c137311207bb1865572820e51fa8bcfe2fbebec10573b4" exitCode=0 Jan 30 12:21:50 crc kubenswrapper[4703]: I0130 12:21:50.555113 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb63b182-d322-4146-a62d-7a918fa5ad61","Type":"ContainerDied","Data":"532482240e6cb7e93b40611bb49e68629fe8d076d618c4d9cdec227adc981e94"} Jan 30 12:21:50 crc kubenswrapper[4703]: I0130 12:21:50.555650 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb63b182-d322-4146-a62d-7a918fa5ad61","Type":"ContainerDied","Data":"a49e31f3846e4eb083445f41149b614c0c2c3ca4b9985a8acc620ae47fe47d31"} Jan 30 12:21:50 crc kubenswrapper[4703]: I0130 12:21:50.555669 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb63b182-d322-4146-a62d-7a918fa5ad61","Type":"ContainerDied","Data":"014dde91f8eff00112c137311207bb1865572820e51fa8bcfe2fbebec10573b4"} Jan 30 12:21:51 crc kubenswrapper[4703]: I0130 12:21:51.415972 4703 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod572a81eb-68df-470a-9ca8-1febfc6167ad"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod572a81eb-68df-470a-9ca8-1febfc6167ad] : Timed out while waiting for systemd to remove kubepods-besteffort-pod572a81eb_68df_470a_9ca8_1febfc6167ad.slice" Jan 30 12:21:52 crc kubenswrapper[4703]: I0130 12:21:52.827945 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/watcher-decision-engine-0" Jan 30 12:21:52 crc kubenswrapper[4703]: I0130 12:21:52.865884 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/watcher-decision-engine-0" Jan 30 12:21:53 crc kubenswrapper[4703]: I0130 12:21:53.592726 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/watcher-decision-engine-0" Jan 30 12:21:53 crc kubenswrapper[4703]: I0130 12:21:53.632272 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/watcher-decision-engine-0" Jan 30 12:21:58 crc kubenswrapper[4703]: I0130 12:21:58.860874 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-x8fjs"] Jan 30 12:21:58 crc kubenswrapper[4703]: I0130 12:21:58.863523 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-x8fjs" Jan 30 12:21:58 crc kubenswrapper[4703]: I0130 12:21:58.880573 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-x8fjs"] Jan 30 12:21:58 crc kubenswrapper[4703]: I0130 12:21:58.962608 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-hczbb"] Jan 30 12:21:58 crc kubenswrapper[4703]: I0130 12:21:58.965585 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-hczbb" Jan 30 12:21:58 crc kubenswrapper[4703]: I0130 12:21:58.986982 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-hczbb"] Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:58.989049 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f2ddfec-e63d-4fbe-9c90-f9131bf80969-operator-scripts\") pod \"nova-api-db-create-x8fjs\" (UID: \"2f2ddfec-e63d-4fbe-9c90-f9131bf80969\") " pod="openstack/nova-api-db-create-x8fjs" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.024187 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bms8r\" (UniqueName: \"kubernetes.io/projected/2f2ddfec-e63d-4fbe-9c90-f9131bf80969-kube-api-access-bms8r\") pod \"nova-api-db-create-x8fjs\" (UID: \"2f2ddfec-e63d-4fbe-9c90-f9131bf80969\") " pod="openstack/nova-api-db-create-x8fjs" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.101142 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-7e42-account-create-update-skmfk"] Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.105116 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7e42-account-create-update-skmfk" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.113730 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.127101 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f2ddfec-e63d-4fbe-9c90-f9131bf80969-operator-scripts\") pod \"nova-api-db-create-x8fjs\" (UID: \"2f2ddfec-e63d-4fbe-9c90-f9131bf80969\") " pod="openstack/nova-api-db-create-x8fjs" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.127253 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82de3756-5f83-4cc6-9afd-8b359a2561f7-operator-scripts\") pod \"nova-cell0-db-create-hczbb\" (UID: \"82de3756-5f83-4cc6-9afd-8b359a2561f7\") " pod="openstack/nova-cell0-db-create-hczbb" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.127453 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bms8r\" (UniqueName: \"kubernetes.io/projected/2f2ddfec-e63d-4fbe-9c90-f9131bf80969-kube-api-access-bms8r\") pod \"nova-api-db-create-x8fjs\" (UID: \"2f2ddfec-e63d-4fbe-9c90-f9131bf80969\") " pod="openstack/nova-api-db-create-x8fjs" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.127652 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p67nk\" (UniqueName: \"kubernetes.io/projected/82de3756-5f83-4cc6-9afd-8b359a2561f7-kube-api-access-p67nk\") pod \"nova-cell0-db-create-hczbb\" (UID: \"82de3756-5f83-4cc6-9afd-8b359a2561f7\") " pod="openstack/nova-cell0-db-create-hczbb" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.128476 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f2ddfec-e63d-4fbe-9c90-f9131bf80969-operator-scripts\") pod \"nova-api-db-create-x8fjs\" (UID: \"2f2ddfec-e63d-4fbe-9c90-f9131bf80969\") " pod="openstack/nova-api-db-create-x8fjs" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.143498 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-2jd7h"] Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.167817 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-2jd7h" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.174172 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7e42-account-create-update-skmfk"] Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.180021 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bms8r\" (UniqueName: \"kubernetes.io/projected/2f2ddfec-e63d-4fbe-9c90-f9131bf80969-kube-api-access-bms8r\") pod \"nova-api-db-create-x8fjs\" (UID: \"2f2ddfec-e63d-4fbe-9c90-f9131bf80969\") " pod="openstack/nova-api-db-create-x8fjs" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.194159 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-x8fjs" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.202474 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-2jd7h"] Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.231419 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7998bb0-fbe6-46ad-bf20-227ac24143c8-operator-scripts\") pod \"nova-api-7e42-account-create-update-skmfk\" (UID: \"a7998bb0-fbe6-46ad-bf20-227ac24143c8\") " pod="openstack/nova-api-7e42-account-create-update-skmfk" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.231478 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfzpg\" (UniqueName: \"kubernetes.io/projected/a7998bb0-fbe6-46ad-bf20-227ac24143c8-kube-api-access-zfzpg\") pod \"nova-api-7e42-account-create-update-skmfk\" (UID: \"a7998bb0-fbe6-46ad-bf20-227ac24143c8\") " pod="openstack/nova-api-7e42-account-create-update-skmfk" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.231539 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82de3756-5f83-4cc6-9afd-8b359a2561f7-operator-scripts\") pod \"nova-cell0-db-create-hczbb\" (UID: \"82de3756-5f83-4cc6-9afd-8b359a2561f7\") " pod="openstack/nova-cell0-db-create-hczbb" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.231686 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p67nk\" (UniqueName: \"kubernetes.io/projected/82de3756-5f83-4cc6-9afd-8b359a2561f7-kube-api-access-p67nk\") pod \"nova-cell0-db-create-hczbb\" (UID: \"82de3756-5f83-4cc6-9afd-8b359a2561f7\") " pod="openstack/nova-cell0-db-create-hczbb" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.252871 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82de3756-5f83-4cc6-9afd-8b359a2561f7-operator-scripts\") pod \"nova-cell0-db-create-hczbb\" (UID: \"82de3756-5f83-4cc6-9afd-8b359a2561f7\") " pod="openstack/nova-cell0-db-create-hczbb" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.286598 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p67nk\" (UniqueName: \"kubernetes.io/projected/82de3756-5f83-4cc6-9afd-8b359a2561f7-kube-api-access-p67nk\") pod \"nova-cell0-db-create-hczbb\" (UID: \"82de3756-5f83-4cc6-9afd-8b359a2561f7\") " pod="openstack/nova-cell0-db-create-hczbb" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.304616 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-7935-account-create-update-tpmdh"] Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.306670 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7935-account-create-update-tpmdh" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.308268 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-hczbb" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.327780 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.336685 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hhtf\" (UniqueName: \"kubernetes.io/projected/8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1-kube-api-access-7hhtf\") pod \"nova-cell1-db-create-2jd7h\" (UID: \"8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1\") " pod="openstack/nova-cell1-db-create-2jd7h" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.336804 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1-operator-scripts\") pod \"nova-cell1-db-create-2jd7h\" (UID: \"8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1\") " pod="openstack/nova-cell1-db-create-2jd7h" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.336884 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7998bb0-fbe6-46ad-bf20-227ac24143c8-operator-scripts\") pod \"nova-api-7e42-account-create-update-skmfk\" (UID: \"a7998bb0-fbe6-46ad-bf20-227ac24143c8\") " pod="openstack/nova-api-7e42-account-create-update-skmfk" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.336913 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfzpg\" (UniqueName: \"kubernetes.io/projected/a7998bb0-fbe6-46ad-bf20-227ac24143c8-kube-api-access-zfzpg\") pod \"nova-api-7e42-account-create-update-skmfk\" (UID: \"a7998bb0-fbe6-46ad-bf20-227ac24143c8\") " pod="openstack/nova-api-7e42-account-create-update-skmfk" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.338782 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7998bb0-fbe6-46ad-bf20-227ac24143c8-operator-scripts\") pod \"nova-api-7e42-account-create-update-skmfk\" (UID: \"a7998bb0-fbe6-46ad-bf20-227ac24143c8\") " pod="openstack/nova-api-7e42-account-create-update-skmfk" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.348790 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-7935-account-create-update-tpmdh"] Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.368294 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfzpg\" (UniqueName: \"kubernetes.io/projected/a7998bb0-fbe6-46ad-bf20-227ac24143c8-kube-api-access-zfzpg\") pod \"nova-api-7e42-account-create-update-skmfk\" (UID: \"a7998bb0-fbe6-46ad-bf20-227ac24143c8\") " pod="openstack/nova-api-7e42-account-create-update-skmfk" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.387176 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-7fec-account-create-update-zft2k"] Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.390795 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-7fec-account-create-update-zft2k" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.394422 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.401869 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-7fec-account-create-update-zft2k"] Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.444633 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1-operator-scripts\") pod \"nova-cell1-db-create-2jd7h\" (UID: \"8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1\") " pod="openstack/nova-cell1-db-create-2jd7h" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.444735 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dzmb\" (UniqueName: \"kubernetes.io/projected/bc0160f9-a98b-4839-b10d-a23168a56417-kube-api-access-9dzmb\") pod \"nova-cell0-7935-account-create-update-tpmdh\" (UID: \"bc0160f9-a98b-4839-b10d-a23168a56417\") " pod="openstack/nova-cell0-7935-account-create-update-tpmdh" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.444898 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc0160f9-a98b-4839-b10d-a23168a56417-operator-scripts\") pod \"nova-cell0-7935-account-create-update-tpmdh\" (UID: \"bc0160f9-a98b-4839-b10d-a23168a56417\") " pod="openstack/nova-cell0-7935-account-create-update-tpmdh" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.445059 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hhtf\" (UniqueName: \"kubernetes.io/projected/8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1-kube-api-access-7hhtf\") pod \"nova-cell1-db-create-2jd7h\" (UID: \"8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1\") " pod="openstack/nova-cell1-db-create-2jd7h" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.446651 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1-operator-scripts\") pod \"nova-cell1-db-create-2jd7h\" (UID: \"8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1\") " pod="openstack/nova-cell1-db-create-2jd7h" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.452146 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7e42-account-create-update-skmfk" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.596411 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hhtf\" (UniqueName: \"kubernetes.io/projected/8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1-kube-api-access-7hhtf\") pod \"nova-cell1-db-create-2jd7h\" (UID: \"8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1\") " pod="openstack/nova-cell1-db-create-2jd7h" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.600085 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dzmb\" (UniqueName: \"kubernetes.io/projected/bc0160f9-a98b-4839-b10d-a23168a56417-kube-api-access-9dzmb\") pod \"nova-cell0-7935-account-create-update-tpmdh\" (UID: \"bc0160f9-a98b-4839-b10d-a23168a56417\") " pod="openstack/nova-cell0-7935-account-create-update-tpmdh" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.600310 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrsnd\" (UniqueName: \"kubernetes.io/projected/2d299026-7d0b-43f0-8f66-269e434576a9-kube-api-access-lrsnd\") pod \"nova-cell1-7fec-account-create-update-zft2k\" (UID: \"2d299026-7d0b-43f0-8f66-269e434576a9\") " pod="openstack/nova-cell1-7fec-account-create-update-zft2k" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.606474 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d299026-7d0b-43f0-8f66-269e434576a9-operator-scripts\") pod \"nova-cell1-7fec-account-create-update-zft2k\" (UID: \"2d299026-7d0b-43f0-8f66-269e434576a9\") " pod="openstack/nova-cell1-7fec-account-create-update-zft2k" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.606891 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc0160f9-a98b-4839-b10d-a23168a56417-operator-scripts\") pod \"nova-cell0-7935-account-create-update-tpmdh\" (UID: \"bc0160f9-a98b-4839-b10d-a23168a56417\") " pod="openstack/nova-cell0-7935-account-create-update-tpmdh" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.608584 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc0160f9-a98b-4839-b10d-a23168a56417-operator-scripts\") pod \"nova-cell0-7935-account-create-update-tpmdh\" (UID: \"bc0160f9-a98b-4839-b10d-a23168a56417\") " pod="openstack/nova-cell0-7935-account-create-update-tpmdh" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.635976 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dzmb\" (UniqueName: \"kubernetes.io/projected/bc0160f9-a98b-4839-b10d-a23168a56417-kube-api-access-9dzmb\") pod \"nova-cell0-7935-account-create-update-tpmdh\" (UID: \"bc0160f9-a98b-4839-b10d-a23168a56417\") " pod="openstack/nova-cell0-7935-account-create-update-tpmdh" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.644664 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-2jd7h" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.683963 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7935-account-create-update-tpmdh" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.711414 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrsnd\" (UniqueName: \"kubernetes.io/projected/2d299026-7d0b-43f0-8f66-269e434576a9-kube-api-access-lrsnd\") pod \"nova-cell1-7fec-account-create-update-zft2k\" (UID: \"2d299026-7d0b-43f0-8f66-269e434576a9\") " pod="openstack/nova-cell1-7fec-account-create-update-zft2k" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.712008 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d299026-7d0b-43f0-8f66-269e434576a9-operator-scripts\") pod \"nova-cell1-7fec-account-create-update-zft2k\" (UID: \"2d299026-7d0b-43f0-8f66-269e434576a9\") " pod="openstack/nova-cell1-7fec-account-create-update-zft2k" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.715455 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d299026-7d0b-43f0-8f66-269e434576a9-operator-scripts\") pod \"nova-cell1-7fec-account-create-update-zft2k\" (UID: \"2d299026-7d0b-43f0-8f66-269e434576a9\") " pod="openstack/nova-cell1-7fec-account-create-update-zft2k" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.743707 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrsnd\" (UniqueName: \"kubernetes.io/projected/2d299026-7d0b-43f0-8f66-269e434576a9-kube-api-access-lrsnd\") pod \"nova-cell1-7fec-account-create-update-zft2k\" (UID: \"2d299026-7d0b-43f0-8f66-269e434576a9\") " pod="openstack/nova-cell1-7fec-account-create-update-zft2k" Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.921109 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.921520 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="3d5e864b-4ad4-447d-8b20-6d9999cea7bb" containerName="glance-log" containerID="cri-o://5e2a67c6b92263c56ecd8e9554ff8f505f0e8f53d8344a3d84e6d9aa54ce988e" gracePeriod=30 Jan 30 12:21:59 crc kubenswrapper[4703]: I0130 12:21:59.922435 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="3d5e864b-4ad4-447d-8b20-6d9999cea7bb" containerName="glance-httpd" containerID="cri-o://598a787f498683049fc201ad26000b02a7f839024e16b91d30c5f557e8ea1365" gracePeriod=30 Jan 30 12:22:00 crc kubenswrapper[4703]: I0130 12:22:00.022211 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-7fec-account-create-update-zft2k" Jan 30 12:22:00 crc kubenswrapper[4703]: I0130 12:22:00.396065 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-x8fjs"] Jan 30 12:22:00 crc kubenswrapper[4703]: I0130 12:22:00.503391 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-2jd7h"] Jan 30 12:22:00 crc kubenswrapper[4703]: I0130 12:22:00.517833 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-hczbb"] Jan 30 12:22:00 crc kubenswrapper[4703]: I0130 12:22:00.725004 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-7935-account-create-update-tpmdh"] Jan 30 12:22:00 crc kubenswrapper[4703]: I0130 12:22:00.743726 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-7e42-account-create-update-skmfk"] Jan 30 12:22:00 crc kubenswrapper[4703]: I0130 12:22:00.762403 4703 generic.go:334] "Generic (PLEG): container finished" podID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerID="d757dd822a89b950ce6e9d4dc97199e2572ee94ccf2beca2ecace35453f877f7" exitCode=137 Jan 30 12:22:00 crc kubenswrapper[4703]: I0130 12:22:00.762611 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5f9958979d-8h859" event={"ID":"b888ea51-970d-4f4d-9e5c-f456ca173472","Type":"ContainerDied","Data":"d757dd822a89b950ce6e9d4dc97199e2572ee94ccf2beca2ecace35453f877f7"} Jan 30 12:22:00 crc kubenswrapper[4703]: I0130 12:22:00.762662 4703 scope.go:117] "RemoveContainer" containerID="44bd0eeab3c7b4371ebc41e5f7618026824b17804f7a8c0f1225abd278d6ca11" Jan 30 12:22:00 crc kubenswrapper[4703]: W0130 12:22:00.776418 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbc0160f9_a98b_4839_b10d_a23168a56417.slice/crio-ee12a1507aabba2600ec1a443ec65abe78ef4e44bd70b09df18d6bddd7487417 WatchSource:0}: Error finding container ee12a1507aabba2600ec1a443ec65abe78ef4e44bd70b09df18d6bddd7487417: Status 404 returned error can't find the container with id ee12a1507aabba2600ec1a443ec65abe78ef4e44bd70b09df18d6bddd7487417 Jan 30 12:22:00 crc kubenswrapper[4703]: I0130 12:22:00.777526 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-hczbb" event={"ID":"82de3756-5f83-4cc6-9afd-8b359a2561f7","Type":"ContainerStarted","Data":"1275928d477474b27de4ec33797b4d36a8a90b7ddff1d112cefdd18d3e6bafdc"} Jan 30 12:22:00 crc kubenswrapper[4703]: I0130 12:22:00.784350 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-2jd7h" event={"ID":"8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1","Type":"ContainerStarted","Data":"ae6fc2b59dcb75a1d1ad8f4f3204d2757b199c5805365ef34d47c7a11863945a"} Jan 30 12:22:00 crc kubenswrapper[4703]: I0130 12:22:00.794652 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-x8fjs" event={"ID":"2f2ddfec-e63d-4fbe-9c90-f9131bf80969","Type":"ContainerStarted","Data":"dfc39365d782b9bce0c56e001ca481936994738ce6f9cf20a1d5163b8db4fae0"} Jan 30 12:22:00 crc kubenswrapper[4703]: I0130 12:22:00.803895 4703 generic.go:334] "Generic (PLEG): container finished" podID="3d5e864b-4ad4-447d-8b20-6d9999cea7bb" containerID="5e2a67c6b92263c56ecd8e9554ff8f505f0e8f53d8344a3d84e6d9aa54ce988e" exitCode=143 Jan 30 12:22:00 crc kubenswrapper[4703]: I0130 12:22:00.803962 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3d5e864b-4ad4-447d-8b20-6d9999cea7bb","Type":"ContainerDied","Data":"5e2a67c6b92263c56ecd8e9554ff8f505f0e8f53d8344a3d84e6d9aa54ce988e"} Jan 30 12:22:00 crc kubenswrapper[4703]: W0130 12:22:00.816897 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7998bb0_fbe6_46ad_bf20_227ac24143c8.slice/crio-937c766fe080c1b88227b3025d96327ed507c79afce46569959497b13af6602b WatchSource:0}: Error finding container 937c766fe080c1b88227b3025d96327ed507c79afce46569959497b13af6602b: Status 404 returned error can't find the container with id 937c766fe080c1b88227b3025d96327ed507c79afce46569959497b13af6602b Jan 30 12:22:00 crc kubenswrapper[4703]: I0130 12:22:00.933629 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-7fec-account-create-update-zft2k"] Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.195306 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7935-account-create-update-tpmdh" event={"ID":"bc0160f9-a98b-4839-b10d-a23168a56417","Type":"ContainerStarted","Data":"ee12a1507aabba2600ec1a443ec65abe78ef4e44bd70b09df18d6bddd7487417"} Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.210705 4703 generic.go:334] "Generic (PLEG): container finished" podID="82de3756-5f83-4cc6-9afd-8b359a2561f7" containerID="e31f986a4561d64e76a8d23b28a7783265cc56369ea559cc6810855ad4df2c58" exitCode=0 Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.210908 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-hczbb" event={"ID":"82de3756-5f83-4cc6-9afd-8b359a2561f7","Type":"ContainerDied","Data":"e31f986a4561d64e76a8d23b28a7783265cc56369ea559cc6810855ad4df2c58"} Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.217224 4703 generic.go:334] "Generic (PLEG): container finished" podID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerID="08c418b36146fb180e1ae5350db6f861f21cc15a2e03dc2d5e47a615ba82bf8f" exitCode=0 Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.217386 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb63b182-d322-4146-a62d-7a918fa5ad61","Type":"ContainerDied","Data":"08c418b36146fb180e1ae5350db6f861f21cc15a2e03dc2d5e47a615ba82bf8f"} Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.217432 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb63b182-d322-4146-a62d-7a918fa5ad61","Type":"ContainerDied","Data":"353c8b6e4c7b3529e6a0050c62b854a1e5927fd7cd165c86be6b17a85df06f1b"} Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.217448 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="353c8b6e4c7b3529e6a0050c62b854a1e5927fd7cd165c86be6b17a85df06f1b" Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.225663 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-7fec-account-create-update-zft2k" event={"ID":"2d299026-7d0b-43f0-8f66-269e434576a9","Type":"ContainerStarted","Data":"0b186511b156765ef1aef93a85b1ca644141407121d938847be943d77e9a2990"} Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.230955 4703 generic.go:334] "Generic (PLEG): container finished" podID="2f2ddfec-e63d-4fbe-9c90-f9131bf80969" containerID="9f2257b842d2944119879d7107a1b3214c625fcbd1cd347ee2de9edf964d0e03" exitCode=0 Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.231050 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-x8fjs" event={"ID":"2f2ddfec-e63d-4fbe-9c90-f9131bf80969","Type":"ContainerDied","Data":"9f2257b842d2944119879d7107a1b3214c625fcbd1cd347ee2de9edf964d0e03"} Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.240508 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7e42-account-create-update-skmfk" event={"ID":"a7998bb0-fbe6-46ad-bf20-227ac24143c8","Type":"ContainerStarted","Data":"937c766fe080c1b88227b3025d96327ed507c79afce46569959497b13af6602b"} Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.246742 4703 generic.go:334] "Generic (PLEG): container finished" podID="8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1" containerID="88757d7a644a38eed98801b8b6c43b72f90255da67c001442dfa1af21e126496" exitCode=0 Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.246857 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-2jd7h" event={"ID":"8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1","Type":"ContainerDied","Data":"88757d7a644a38eed98801b8b6c43b72f90255da67c001442dfa1af21e126496"} Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.263097 4703 generic.go:334] "Generic (PLEG): container finished" podID="9c6d3262-7469-45ac-b5c8-9eb0f9456a5a" containerID="fda2400a9ade21756ca56d52218ebcddb5692194c691d158ef06b11e6b0694d2" exitCode=137 Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.263170 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77fb4cf9b8-pw692" event={"ID":"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a","Type":"ContainerDied","Data":"fda2400a9ade21756ca56d52218ebcddb5692194c691d158ef06b11e6b0694d2"} Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.263213 4703 scope.go:117] "RemoveContainer" containerID="a29c27b7fb233396218ace770f8222710c040473791f84a186e8849760d220e4" Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.292052 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.466000 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-combined-ca-bundle\") pod \"bb63b182-d322-4146-a62d-7a918fa5ad61\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.466200 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-config-data\") pod \"bb63b182-d322-4146-a62d-7a918fa5ad61\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.466257 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb63b182-d322-4146-a62d-7a918fa5ad61-log-httpd\") pod \"bb63b182-d322-4146-a62d-7a918fa5ad61\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.466283 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb63b182-d322-4146-a62d-7a918fa5ad61-run-httpd\") pod \"bb63b182-d322-4146-a62d-7a918fa5ad61\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.466383 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vs2km\" (UniqueName: \"kubernetes.io/projected/bb63b182-d322-4146-a62d-7a918fa5ad61-kube-api-access-vs2km\") pod \"bb63b182-d322-4146-a62d-7a918fa5ad61\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.466597 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-sg-core-conf-yaml\") pod \"bb63b182-d322-4146-a62d-7a918fa5ad61\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.466739 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-scripts\") pod \"bb63b182-d322-4146-a62d-7a918fa5ad61\" (UID: \"bb63b182-d322-4146-a62d-7a918fa5ad61\") " Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.473684 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb63b182-d322-4146-a62d-7a918fa5ad61-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "bb63b182-d322-4146-a62d-7a918fa5ad61" (UID: "bb63b182-d322-4146-a62d-7a918fa5ad61"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.476306 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb63b182-d322-4146-a62d-7a918fa5ad61-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "bb63b182-d322-4146-a62d-7a918fa5ad61" (UID: "bb63b182-d322-4146-a62d-7a918fa5ad61"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.478325 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.478686 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="2e06db1b-bded-42ad-bd1e-8f7d76faf399" containerName="glance-log" containerID="cri-o://0c096790719dd5337c355ae48bef2725394ace49dc8b6390b01c1f5ace1d1355" gracePeriod=30 Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.478764 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="2e06db1b-bded-42ad-bd1e-8f7d76faf399" containerName="glance-httpd" containerID="cri-o://554ef222568526b96f371ff24cf6656b26fd015e644d5c749f496cef9d68ab04" gracePeriod=30 Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.499493 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-scripts" (OuterVolumeSpecName: "scripts") pod "bb63b182-d322-4146-a62d-7a918fa5ad61" (UID: "bb63b182-d322-4146-a62d-7a918fa5ad61"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.523924 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb63b182-d322-4146-a62d-7a918fa5ad61-kube-api-access-vs2km" (OuterVolumeSpecName: "kube-api-access-vs2km") pod "bb63b182-d322-4146-a62d-7a918fa5ad61" (UID: "bb63b182-d322-4146-a62d-7a918fa5ad61"). InnerVolumeSpecName "kube-api-access-vs2km". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.570412 4703 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb63b182-d322-4146-a62d-7a918fa5ad61-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.570472 4703 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb63b182-d322-4146-a62d-7a918fa5ad61-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.570486 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vs2km\" (UniqueName: \"kubernetes.io/projected/bb63b182-d322-4146-a62d-7a918fa5ad61-kube-api-access-vs2km\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.570501 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.684446 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "bb63b182-d322-4146-a62d-7a918fa5ad61" (UID: "bb63b182-d322-4146-a62d-7a918fa5ad61"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.707162 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bb63b182-d322-4146-a62d-7a918fa5ad61" (UID: "bb63b182-d322-4146-a62d-7a918fa5ad61"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.781807 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-config-data" (OuterVolumeSpecName: "config-data") pod "bb63b182-d322-4146-a62d-7a918fa5ad61" (UID: "bb63b182-d322-4146-a62d-7a918fa5ad61"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.786292 4703 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.786353 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:02 crc kubenswrapper[4703]: I0130 12:22:02.786368 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb63b182-d322-4146-a62d-7a918fa5ad61-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.287375 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-77fb4cf9b8-pw692" event={"ID":"9c6d3262-7469-45ac-b5c8-9eb0f9456a5a","Type":"ContainerStarted","Data":"e5c570ec54ab57955d949c302325c1907db223f68478ef9a5e4f1beb6ca16ef4"} Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.293765 4703 generic.go:334] "Generic (PLEG): container finished" podID="2d299026-7d0b-43f0-8f66-269e434576a9" containerID="02935b86d9ba3a50b03c82fd1c55ca5b73c248933f84c13fdff687d92d515897" exitCode=0 Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.293868 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-7fec-account-create-update-zft2k" event={"ID":"2d299026-7d0b-43f0-8f66-269e434576a9","Type":"ContainerDied","Data":"02935b86d9ba3a50b03c82fd1c55ca5b73c248933f84c13fdff687d92d515897"} Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.299097 4703 generic.go:334] "Generic (PLEG): container finished" podID="3d5e864b-4ad4-447d-8b20-6d9999cea7bb" containerID="598a787f498683049fc201ad26000b02a7f839024e16b91d30c5f557e8ea1365" exitCode=0 Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.299249 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3d5e864b-4ad4-447d-8b20-6d9999cea7bb","Type":"ContainerDied","Data":"598a787f498683049fc201ad26000b02a7f839024e16b91d30c5f557e8ea1365"} Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.305813 4703 generic.go:334] "Generic (PLEG): container finished" podID="2e06db1b-bded-42ad-bd1e-8f7d76faf399" containerID="0c096790719dd5337c355ae48bef2725394ace49dc8b6390b01c1f5ace1d1355" exitCode=143 Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.305879 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2e06db1b-bded-42ad-bd1e-8f7d76faf399","Type":"ContainerDied","Data":"0c096790719dd5337c355ae48bef2725394ace49dc8b6390b01c1f5ace1d1355"} Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.310698 4703 generic.go:334] "Generic (PLEG): container finished" podID="a7998bb0-fbe6-46ad-bf20-227ac24143c8" containerID="96e429b7f97933e42d40124d44a156d4ef71adcdbdb6d58507bf462568b32612" exitCode=0 Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.310767 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7e42-account-create-update-skmfk" event={"ID":"a7998bb0-fbe6-46ad-bf20-227ac24143c8","Type":"ContainerDied","Data":"96e429b7f97933e42d40124d44a156d4ef71adcdbdb6d58507bf462568b32612"} Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.317426 4703 generic.go:334] "Generic (PLEG): container finished" podID="bc0160f9-a98b-4839-b10d-a23168a56417" containerID="53fd485e55a85bd02e29df0b85806adee9e53ccee5ced2686a8b1a1fcda98ca4" exitCode=0 Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.317501 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7935-account-create-update-tpmdh" event={"ID":"bc0160f9-a98b-4839-b10d-a23168a56417","Type":"ContainerDied","Data":"53fd485e55a85bd02e29df0b85806adee9e53ccee5ced2686a8b1a1fcda98ca4"} Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.334986 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5f9958979d-8h859" event={"ID":"b888ea51-970d-4f4d-9e5c-f456ca173472","Type":"ContainerStarted","Data":"12c6e01d1ec7cf675be0982e70d170f25953b74704982af7f66aaded402ddb28"} Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.335491 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.480227 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.513224 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.539400 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:22:03 crc kubenswrapper[4703]: E0130 12:22:03.540391 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerName="sg-core" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.540415 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerName="sg-core" Jan 30 12:22:03 crc kubenswrapper[4703]: E0130 12:22:03.540430 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerName="proxy-httpd" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.540439 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerName="proxy-httpd" Jan 30 12:22:03 crc kubenswrapper[4703]: E0130 12:22:03.540461 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerName="ceilometer-central-agent" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.540467 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerName="ceilometer-central-agent" Jan 30 12:22:03 crc kubenswrapper[4703]: E0130 12:22:03.540489 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerName="ceilometer-notification-agent" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.540495 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerName="ceilometer-notification-agent" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.540709 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerName="ceilometer-notification-agent" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.540729 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerName="sg-core" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.540756 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerName="ceilometer-central-agent" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.540769 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb63b182-d322-4146-a62d-7a918fa5ad61" containerName="proxy-httpd" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.543751 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.551002 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.552290 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.558056 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.765223 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.765402 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-scripts\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.765512 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.765543 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stk6f\" (UniqueName: \"kubernetes.io/projected/65249297-6776-467d-a69e-863e3a9702e4-kube-api-access-stk6f\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.765564 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65249297-6776-467d-a69e-863e3a9702e4-log-httpd\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.765829 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-config-data\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.765909 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65249297-6776-467d-a69e-863e3a9702e4-run-httpd\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.870060 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.870115 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stk6f\" (UniqueName: \"kubernetes.io/projected/65249297-6776-467d-a69e-863e3a9702e4-kube-api-access-stk6f\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.870164 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65249297-6776-467d-a69e-863e3a9702e4-log-httpd\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.870207 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-config-data\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.870233 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65249297-6776-467d-a69e-863e3a9702e4-run-httpd\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.870316 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.870353 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-scripts\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.871294 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65249297-6776-467d-a69e-863e3a9702e4-run-httpd\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.872113 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65249297-6776-467d-a69e-863e3a9702e4-log-httpd\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.881894 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-scripts\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.890716 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.894381 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.894534 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stk6f\" (UniqueName: \"kubernetes.io/projected/65249297-6776-467d-a69e-863e3a9702e4-kube-api-access-stk6f\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:03 crc kubenswrapper[4703]: I0130 12:22:03.894646 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-config-data\") pod \"ceilometer-0\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.035517 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-2jd7h" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.092528 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hhtf\" (UniqueName: \"kubernetes.io/projected/8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1-kube-api-access-7hhtf\") pod \"8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1\" (UID: \"8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1\") " Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.093017 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1-operator-scripts\") pod \"8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1\" (UID: \"8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1\") " Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.096692 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1" (UID: "8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.101918 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1-kube-api-access-7hhtf" (OuterVolumeSpecName: "kube-api-access-7hhtf") pod "8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1" (UID: "8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1"). InnerVolumeSpecName "kube-api-access-7hhtf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.181071 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.198553 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hhtf\" (UniqueName: \"kubernetes.io/projected/8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1-kube-api-access-7hhtf\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.198603 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.226592 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-x8fjs" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.257636 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.263662 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-hczbb" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.299548 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l65mq\" (UniqueName: \"kubernetes.io/projected/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-kube-api-access-l65mq\") pod \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.299628 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-public-tls-certs\") pod \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.299735 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p67nk\" (UniqueName: \"kubernetes.io/projected/82de3756-5f83-4cc6-9afd-8b359a2561f7-kube-api-access-p67nk\") pod \"82de3756-5f83-4cc6-9afd-8b359a2561f7\" (UID: \"82de3756-5f83-4cc6-9afd-8b359a2561f7\") " Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.299779 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-httpd-run\") pod \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.299823 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bms8r\" (UniqueName: \"kubernetes.io/projected/2f2ddfec-e63d-4fbe-9c90-f9131bf80969-kube-api-access-bms8r\") pod \"2f2ddfec-e63d-4fbe-9c90-f9131bf80969\" (UID: \"2f2ddfec-e63d-4fbe-9c90-f9131bf80969\") " Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.299842 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-scripts\") pod \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.299870 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.299914 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-combined-ca-bundle\") pod \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.299947 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-config-data\") pod \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.300001 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82de3756-5f83-4cc6-9afd-8b359a2561f7-operator-scripts\") pod \"82de3756-5f83-4cc6-9afd-8b359a2561f7\" (UID: \"82de3756-5f83-4cc6-9afd-8b359a2561f7\") " Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.300038 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-logs\") pod \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\" (UID: \"3d5e864b-4ad4-447d-8b20-6d9999cea7bb\") " Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.300643 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f2ddfec-e63d-4fbe-9c90-f9131bf80969-operator-scripts\") pod \"2f2ddfec-e63d-4fbe-9c90-f9131bf80969\" (UID: \"2f2ddfec-e63d-4fbe-9c90-f9131bf80969\") " Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.301480 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f2ddfec-e63d-4fbe-9c90-f9131bf80969-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2f2ddfec-e63d-4fbe-9c90-f9131bf80969" (UID: "2f2ddfec-e63d-4fbe-9c90-f9131bf80969"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.306590 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-logs" (OuterVolumeSpecName: "logs") pod "3d5e864b-4ad4-447d-8b20-6d9999cea7bb" (UID: "3d5e864b-4ad4-447d-8b20-6d9999cea7bb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.313592 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "3d5e864b-4ad4-447d-8b20-6d9999cea7bb" (UID: "3d5e864b-4ad4-447d-8b20-6d9999cea7bb"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.312715 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82de3756-5f83-4cc6-9afd-8b359a2561f7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "82de3756-5f83-4cc6-9afd-8b359a2561f7" (UID: "82de3756-5f83-4cc6-9afd-8b359a2561f7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.320274 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f2ddfec-e63d-4fbe-9c90-f9131bf80969-kube-api-access-bms8r" (OuterVolumeSpecName: "kube-api-access-bms8r") pod "2f2ddfec-e63d-4fbe-9c90-f9131bf80969" (UID: "2f2ddfec-e63d-4fbe-9c90-f9131bf80969"). InnerVolumeSpecName "kube-api-access-bms8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.334799 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-scripts" (OuterVolumeSpecName: "scripts") pod "3d5e864b-4ad4-447d-8b20-6d9999cea7bb" (UID: "3d5e864b-4ad4-447d-8b20-6d9999cea7bb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.336029 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-kube-api-access-l65mq" (OuterVolumeSpecName: "kube-api-access-l65mq") pod "3d5e864b-4ad4-447d-8b20-6d9999cea7bb" (UID: "3d5e864b-4ad4-447d-8b20-6d9999cea7bb"). InnerVolumeSpecName "kube-api-access-l65mq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.339386 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "3d5e864b-4ad4-447d-8b20-6d9999cea7bb" (UID: "3d5e864b-4ad4-447d-8b20-6d9999cea7bb"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.356561 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82de3756-5f83-4cc6-9afd-8b359a2561f7-kube-api-access-p67nk" (OuterVolumeSpecName: "kube-api-access-p67nk") pod "82de3756-5f83-4cc6-9afd-8b359a2561f7" (UID: "82de3756-5f83-4cc6-9afd-8b359a2561f7"). InnerVolumeSpecName "kube-api-access-p67nk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.364415 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3d5e864b-4ad4-447d-8b20-6d9999cea7bb" (UID: "3d5e864b-4ad4-447d-8b20-6d9999cea7bb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.390265 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"3d5e864b-4ad4-447d-8b20-6d9999cea7bb","Type":"ContainerDied","Data":"49f761ff573444b9b2457ee44bf53aa66e2e8d2f5bf577ffbdb31b4e4a4ae16c"} Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.390346 4703 scope.go:117] "RemoveContainer" containerID="598a787f498683049fc201ad26000b02a7f839024e16b91d30c5f557e8ea1365" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.391389 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.401023 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-hczbb" event={"ID":"82de3756-5f83-4cc6-9afd-8b359a2561f7","Type":"ContainerDied","Data":"1275928d477474b27de4ec33797b4d36a8a90b7ddff1d112cefdd18d3e6bafdc"} Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.401079 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1275928d477474b27de4ec33797b4d36a8a90b7ddff1d112cefdd18d3e6bafdc" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.401523 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-hczbb" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.407906 4703 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.407946 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bms8r\" (UniqueName: \"kubernetes.io/projected/2f2ddfec-e63d-4fbe-9c90-f9131bf80969-kube-api-access-bms8r\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.407958 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.407993 4703 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.408004 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.408014 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.408099 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82de3756-5f83-4cc6-9afd-8b359a2561f7-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.408111 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2f2ddfec-e63d-4fbe-9c90-f9131bf80969-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.408188 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l65mq\" (UniqueName: \"kubernetes.io/projected/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-kube-api-access-l65mq\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.408199 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p67nk\" (UniqueName: \"kubernetes.io/projected/82de3756-5f83-4cc6-9afd-8b359a2561f7-kube-api-access-p67nk\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.424083 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-2jd7h" event={"ID":"8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1","Type":"ContainerDied","Data":"ae6fc2b59dcb75a1d1ad8f4f3204d2757b199c5805365ef34d47c7a11863945a"} Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.424249 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ae6fc2b59dcb75a1d1ad8f4f3204d2757b199c5805365ef34d47c7a11863945a" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.424376 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-2jd7h" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.433921 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-x8fjs" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.440627 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-x8fjs" event={"ID":"2f2ddfec-e63d-4fbe-9c90-f9131bf80969","Type":"ContainerDied","Data":"dfc39365d782b9bce0c56e001ca481936994738ce6f9cf20a1d5163b8db4fae0"} Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.440713 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dfc39365d782b9bce0c56e001ca481936994738ce6f9cf20a1d5163b8db4fae0" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.453212 4703 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.508714 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-config-data" (OuterVolumeSpecName: "config-data") pod "3d5e864b-4ad4-447d-8b20-6d9999cea7bb" (UID: "3d5e864b-4ad4-447d-8b20-6d9999cea7bb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.508979 4703 scope.go:117] "RemoveContainer" containerID="5e2a67c6b92263c56ecd8e9554ff8f505f0e8f53d8344a3d84e6d9aa54ce988e" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.510941 4703 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.510986 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.518468 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "3d5e864b-4ad4-447d-8b20-6d9999cea7bb" (UID: "3d5e864b-4ad4-447d-8b20-6d9999cea7bb"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.614359 4703 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d5e864b-4ad4-447d-8b20-6d9999cea7bb-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.753589 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.828219 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.883239 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.940216 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 30 12:22:04 crc kubenswrapper[4703]: E0130 12:22:04.941352 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d5e864b-4ad4-447d-8b20-6d9999cea7bb" containerName="glance-httpd" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.941372 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d5e864b-4ad4-447d-8b20-6d9999cea7bb" containerName="glance-httpd" Jan 30 12:22:04 crc kubenswrapper[4703]: E0130 12:22:04.941384 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f2ddfec-e63d-4fbe-9c90-f9131bf80969" containerName="mariadb-database-create" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.941391 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f2ddfec-e63d-4fbe-9c90-f9131bf80969" containerName="mariadb-database-create" Jan 30 12:22:04 crc kubenswrapper[4703]: E0130 12:22:04.941420 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1" containerName="mariadb-database-create" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.941426 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1" containerName="mariadb-database-create" Jan 30 12:22:04 crc kubenswrapper[4703]: E0130 12:22:04.941441 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82de3756-5f83-4cc6-9afd-8b359a2561f7" containerName="mariadb-database-create" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.941449 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="82de3756-5f83-4cc6-9afd-8b359a2561f7" containerName="mariadb-database-create" Jan 30 12:22:04 crc kubenswrapper[4703]: E0130 12:22:04.941470 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d5e864b-4ad4-447d-8b20-6d9999cea7bb" containerName="glance-log" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.941476 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d5e864b-4ad4-447d-8b20-6d9999cea7bb" containerName="glance-log" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.941661 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1" containerName="mariadb-database-create" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.941682 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f2ddfec-e63d-4fbe-9c90-f9131bf80969" containerName="mariadb-database-create" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.941693 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="82de3756-5f83-4cc6-9afd-8b359a2561f7" containerName="mariadb-database-create" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.941702 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d5e864b-4ad4-447d-8b20-6d9999cea7bb" containerName="glance-httpd" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.941712 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d5e864b-4ad4-447d-8b20-6d9999cea7bb" containerName="glance-log" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.942939 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.946618 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 30 12:22:04 crc kubenswrapper[4703]: I0130 12:22:04.957210 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.201987 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.202508 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-config-data\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.203147 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8vwh\" (UniqueName: \"kubernetes.io/projected/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-kube-api-access-p8vwh\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.203304 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.203490 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-scripts\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.203599 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-logs\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.203704 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.203943 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.208904 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d5e864b-4ad4-447d-8b20-6d9999cea7bb" path="/var/lib/kubelet/pods/3d5e864b-4ad4-447d-8b20-6d9999cea7bb/volumes" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.210936 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb63b182-d322-4146-a62d-7a918fa5ad61" path="/var/lib/kubelet/pods/bb63b182-d322-4146-a62d-7a918fa5ad61/volumes" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.212260 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.212300 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.559568 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-scripts\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.559637 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-logs\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.559664 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.559768 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.559824 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.559859 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-config-data\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.560024 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8vwh\" (UniqueName: \"kubernetes.io/projected/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-kube-api-access-p8vwh\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.560060 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.578329 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.579394 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.579623 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.581237 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-logs\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.586107 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.625698 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65249297-6776-467d-a69e-863e3a9702e4","Type":"ContainerStarted","Data":"95fe24e2320da1bdc46ac79afc802b0a4c29c5b4bcebe9136daa62bac6acbea1"} Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.628762 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-7fec-account-create-update-zft2k" event={"ID":"2d299026-7d0b-43f0-8f66-269e434576a9","Type":"ContainerDied","Data":"0b186511b156765ef1aef93a85b1ca644141407121d938847be943d77e9a2990"} Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.628853 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0b186511b156765ef1aef93a85b1ca644141407121d938847be943d77e9a2990" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.641172 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.641623 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-scripts\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.646808 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-config-data\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.660952 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.672008 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8vwh\" (UniqueName: \"kubernetes.io/projected/0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9-kube-api-access-p8vwh\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.677392 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9\") " pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.817505 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-7fec-account-create-update-zft2k" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.875852 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d299026-7d0b-43f0-8f66-269e434576a9-operator-scripts\") pod \"2d299026-7d0b-43f0-8f66-269e434576a9\" (UID: \"2d299026-7d0b-43f0-8f66-269e434576a9\") " Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.876720 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrsnd\" (UniqueName: \"kubernetes.io/projected/2d299026-7d0b-43f0-8f66-269e434576a9-kube-api-access-lrsnd\") pod \"2d299026-7d0b-43f0-8f66-269e434576a9\" (UID: \"2d299026-7d0b-43f0-8f66-269e434576a9\") " Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.877728 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2d299026-7d0b-43f0-8f66-269e434576a9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2d299026-7d0b-43f0-8f66-269e434576a9" (UID: "2d299026-7d0b-43f0-8f66-269e434576a9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.910734 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d299026-7d0b-43f0-8f66-269e434576a9-kube-api-access-lrsnd" (OuterVolumeSpecName: "kube-api-access-lrsnd") pod "2d299026-7d0b-43f0-8f66-269e434576a9" (UID: "2d299026-7d0b-43f0-8f66-269e434576a9"). InnerVolumeSpecName "kube-api-access-lrsnd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.966281 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.979277 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2d299026-7d0b-43f0-8f66-269e434576a9-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:05 crc kubenswrapper[4703]: I0130 12:22:05.979318 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrsnd\" (UniqueName: \"kubernetes.io/projected/2d299026-7d0b-43f0-8f66-269e434576a9-kube-api-access-lrsnd\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.179231 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7935-account-create-update-tpmdh" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.184204 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7e42-account-create-update-skmfk" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.350690 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfzpg\" (UniqueName: \"kubernetes.io/projected/a7998bb0-fbe6-46ad-bf20-227ac24143c8-kube-api-access-zfzpg\") pod \"a7998bb0-fbe6-46ad-bf20-227ac24143c8\" (UID: \"a7998bb0-fbe6-46ad-bf20-227ac24143c8\") " Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.350811 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7998bb0-fbe6-46ad-bf20-227ac24143c8-operator-scripts\") pod \"a7998bb0-fbe6-46ad-bf20-227ac24143c8\" (UID: \"a7998bb0-fbe6-46ad-bf20-227ac24143c8\") " Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.350840 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc0160f9-a98b-4839-b10d-a23168a56417-operator-scripts\") pod \"bc0160f9-a98b-4839-b10d-a23168a56417\" (UID: \"bc0160f9-a98b-4839-b10d-a23168a56417\") " Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.350865 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dzmb\" (UniqueName: \"kubernetes.io/projected/bc0160f9-a98b-4839-b10d-a23168a56417-kube-api-access-9dzmb\") pod \"bc0160f9-a98b-4839-b10d-a23168a56417\" (UID: \"bc0160f9-a98b-4839-b10d-a23168a56417\") " Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.353672 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7998bb0-fbe6-46ad-bf20-227ac24143c8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a7998bb0-fbe6-46ad-bf20-227ac24143c8" (UID: "a7998bb0-fbe6-46ad-bf20-227ac24143c8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.362693 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc0160f9-a98b-4839-b10d-a23168a56417-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bc0160f9-a98b-4839-b10d-a23168a56417" (UID: "bc0160f9-a98b-4839-b10d-a23168a56417"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.363425 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a7998bb0-fbe6-46ad-bf20-227ac24143c8-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.363450 4703 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bc0160f9-a98b-4839-b10d-a23168a56417-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.363650 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7998bb0-fbe6-46ad-bf20-227ac24143c8-kube-api-access-zfzpg" (OuterVolumeSpecName: "kube-api-access-zfzpg") pod "a7998bb0-fbe6-46ad-bf20-227ac24143c8" (UID: "a7998bb0-fbe6-46ad-bf20-227ac24143c8"). InnerVolumeSpecName "kube-api-access-zfzpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.363731 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc0160f9-a98b-4839-b10d-a23168a56417-kube-api-access-9dzmb" (OuterVolumeSpecName: "kube-api-access-9dzmb") pod "bc0160f9-a98b-4839-b10d-a23168a56417" (UID: "bc0160f9-a98b-4839-b10d-a23168a56417"). InnerVolumeSpecName "kube-api-access-9dzmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.410580 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.482107 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfzpg\" (UniqueName: \"kubernetes.io/projected/a7998bb0-fbe6-46ad-bf20-227ac24143c8-kube-api-access-zfzpg\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.486111 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dzmb\" (UniqueName: \"kubernetes.io/projected/bc0160f9-a98b-4839-b10d-a23168a56417-kube-api-access-9dzmb\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.587393 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e06db1b-bded-42ad-bd1e-8f7d76faf399-logs\") pod \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.587446 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-combined-ca-bundle\") pod \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.587514 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-internal-tls-certs\") pod \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.587664 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2e06db1b-bded-42ad-bd1e-8f7d76faf399-httpd-run\") pod \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.587825 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-config-data\") pod \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.587993 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-scripts\") pod \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.588065 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.588160 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4jq5\" (UniqueName: \"kubernetes.io/projected/2e06db1b-bded-42ad-bd1e-8f7d76faf399-kube-api-access-q4jq5\") pod \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\" (UID: \"2e06db1b-bded-42ad-bd1e-8f7d76faf399\") " Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.591113 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e06db1b-bded-42ad-bd1e-8f7d76faf399-logs" (OuterVolumeSpecName: "logs") pod "2e06db1b-bded-42ad-bd1e-8f7d76faf399" (UID: "2e06db1b-bded-42ad-bd1e-8f7d76faf399"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.591525 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e06db1b-bded-42ad-bd1e-8f7d76faf399-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "2e06db1b-bded-42ad-bd1e-8f7d76faf399" (UID: "2e06db1b-bded-42ad-bd1e-8f7d76faf399"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.607750 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e06db1b-bded-42ad-bd1e-8f7d76faf399-kube-api-access-q4jq5" (OuterVolumeSpecName: "kube-api-access-q4jq5") pod "2e06db1b-bded-42ad-bd1e-8f7d76faf399" (UID: "2e06db1b-bded-42ad-bd1e-8f7d76faf399"). InnerVolumeSpecName "kube-api-access-q4jq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.615549 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "2e06db1b-bded-42ad-bd1e-8f7d76faf399" (UID: "2e06db1b-bded-42ad-bd1e-8f7d76faf399"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.615717 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-scripts" (OuterVolumeSpecName: "scripts") pod "2e06db1b-bded-42ad-bd1e-8f7d76faf399" (UID: "2e06db1b-bded-42ad-bd1e-8f7d76faf399"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.663882 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2e06db1b-bded-42ad-bd1e-8f7d76faf399" (UID: "2e06db1b-bded-42ad-bd1e-8f7d76faf399"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.670791 4703 generic.go:334] "Generic (PLEG): container finished" podID="2e06db1b-bded-42ad-bd1e-8f7d76faf399" containerID="554ef222568526b96f371ff24cf6656b26fd015e644d5c749f496cef9d68ab04" exitCode=0 Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.671361 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2e06db1b-bded-42ad-bd1e-8f7d76faf399","Type":"ContainerDied","Data":"554ef222568526b96f371ff24cf6656b26fd015e644d5c749f496cef9d68ab04"} Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.671542 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2e06db1b-bded-42ad-bd1e-8f7d76faf399","Type":"ContainerDied","Data":"48aa4aedf0473662770adcc3cba881b2628ca96e26e186984c656a8fe6a6637d"} Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.670910 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.671699 4703 scope.go:117] "RemoveContainer" containerID="554ef222568526b96f371ff24cf6656b26fd015e644d5c749f496cef9d68ab04" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.686776 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "2e06db1b-bded-42ad-bd1e-8f7d76faf399" (UID: "2e06db1b-bded-42ad-bd1e-8f7d76faf399"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.686902 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-7e42-account-create-update-skmfk" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.686890 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-7e42-account-create-update-skmfk" event={"ID":"a7998bb0-fbe6-46ad-bf20-227ac24143c8","Type":"ContainerDied","Data":"937c766fe080c1b88227b3025d96327ed507c79afce46569959497b13af6602b"} Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.687431 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="937c766fe080c1b88227b3025d96327ed507c79afce46569959497b13af6602b" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.695220 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2e06db1b-bded-42ad-bd1e-8f7d76faf399-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.695252 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.695263 4703 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.695272 4703 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2e06db1b-bded-42ad-bd1e-8f7d76faf399-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.695283 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.695311 4703 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.695322 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4jq5\" (UniqueName: \"kubernetes.io/projected/2e06db1b-bded-42ad-bd1e-8f7d76faf399-kube-api-access-q4jq5\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.700241 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65249297-6776-467d-a69e-863e3a9702e4","Type":"ContainerStarted","Data":"53abc2e3771959806dde05dfde05d58a401a7c5f4372f904e42e7e40cbd91918"} Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.709505 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-7fec-account-create-update-zft2k" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.712474 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7935-account-create-update-tpmdh" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.713292 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7935-account-create-update-tpmdh" event={"ID":"bc0160f9-a98b-4839-b10d-a23168a56417","Type":"ContainerDied","Data":"ee12a1507aabba2600ec1a443ec65abe78ef4e44bd70b09df18d6bddd7487417"} Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.713360 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ee12a1507aabba2600ec1a443ec65abe78ef4e44bd70b09df18d6bddd7487417" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.723402 4703 scope.go:117] "RemoveContainer" containerID="0c096790719dd5337c355ae48bef2725394ace49dc8b6390b01c1f5ace1d1355" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.755308 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.765044 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-config-data" (OuterVolumeSpecName: "config-data") pod "2e06db1b-bded-42ad-bd1e-8f7d76faf399" (UID: "2e06db1b-bded-42ad-bd1e-8f7d76faf399"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.783912 4703 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.799148 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2e06db1b-bded-42ad-bd1e-8f7d76faf399-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.799192 4703 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.809453 4703 scope.go:117] "RemoveContainer" containerID="554ef222568526b96f371ff24cf6656b26fd015e644d5c749f496cef9d68ab04" Jan 30 12:22:06 crc kubenswrapper[4703]: E0130 12:22:06.812075 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"554ef222568526b96f371ff24cf6656b26fd015e644d5c749f496cef9d68ab04\": container with ID starting with 554ef222568526b96f371ff24cf6656b26fd015e644d5c749f496cef9d68ab04 not found: ID does not exist" containerID="554ef222568526b96f371ff24cf6656b26fd015e644d5c749f496cef9d68ab04" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.812335 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"554ef222568526b96f371ff24cf6656b26fd015e644d5c749f496cef9d68ab04"} err="failed to get container status \"554ef222568526b96f371ff24cf6656b26fd015e644d5c749f496cef9d68ab04\": rpc error: code = NotFound desc = could not find container \"554ef222568526b96f371ff24cf6656b26fd015e644d5c749f496cef9d68ab04\": container with ID starting with 554ef222568526b96f371ff24cf6656b26fd015e644d5c749f496cef9d68ab04 not found: ID does not exist" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.812404 4703 scope.go:117] "RemoveContainer" containerID="0c096790719dd5337c355ae48bef2725394ace49dc8b6390b01c1f5ace1d1355" Jan 30 12:22:06 crc kubenswrapper[4703]: E0130 12:22:06.813464 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c096790719dd5337c355ae48bef2725394ace49dc8b6390b01c1f5ace1d1355\": container with ID starting with 0c096790719dd5337c355ae48bef2725394ace49dc8b6390b01c1f5ace1d1355 not found: ID does not exist" containerID="0c096790719dd5337c355ae48bef2725394ace49dc8b6390b01c1f5ace1d1355" Jan 30 12:22:06 crc kubenswrapper[4703]: I0130 12:22:06.813563 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c096790719dd5337c355ae48bef2725394ace49dc8b6390b01c1f5ace1d1355"} err="failed to get container status \"0c096790719dd5337c355ae48bef2725394ace49dc8b6390b01c1f5ace1d1355\": rpc error: code = NotFound desc = could not find container \"0c096790719dd5337c355ae48bef2725394ace49dc8b6390b01c1f5ace1d1355\": container with ID starting with 0c096790719dd5337c355ae48bef2725394ace49dc8b6390b01c1f5ace1d1355 not found: ID does not exist" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.454521 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.455183 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.455211 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 30 12:22:07 crc kubenswrapper[4703]: E0130 12:22:07.455676 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc0160f9-a98b-4839-b10d-a23168a56417" containerName="mariadb-account-create-update" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.455699 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc0160f9-a98b-4839-b10d-a23168a56417" containerName="mariadb-account-create-update" Jan 30 12:22:07 crc kubenswrapper[4703]: E0130 12:22:07.455728 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d299026-7d0b-43f0-8f66-269e434576a9" containerName="mariadb-account-create-update" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.455739 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d299026-7d0b-43f0-8f66-269e434576a9" containerName="mariadb-account-create-update" Jan 30 12:22:07 crc kubenswrapper[4703]: E0130 12:22:07.455768 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e06db1b-bded-42ad-bd1e-8f7d76faf399" containerName="glance-httpd" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.455776 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e06db1b-bded-42ad-bd1e-8f7d76faf399" containerName="glance-httpd" Jan 30 12:22:07 crc kubenswrapper[4703]: E0130 12:22:07.455791 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e06db1b-bded-42ad-bd1e-8f7d76faf399" containerName="glance-log" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.455799 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e06db1b-bded-42ad-bd1e-8f7d76faf399" containerName="glance-log" Jan 30 12:22:07 crc kubenswrapper[4703]: E0130 12:22:07.455823 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7998bb0-fbe6-46ad-bf20-227ac24143c8" containerName="mariadb-account-create-update" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.455831 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7998bb0-fbe6-46ad-bf20-227ac24143c8" containerName="mariadb-account-create-update" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.456090 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d299026-7d0b-43f0-8f66-269e434576a9" containerName="mariadb-account-create-update" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.456237 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e06db1b-bded-42ad-bd1e-8f7d76faf399" containerName="glance-log" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.456271 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7998bb0-fbe6-46ad-bf20-227ac24143c8" containerName="mariadb-account-create-update" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.456284 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc0160f9-a98b-4839-b10d-a23168a56417" containerName="mariadb-account-create-update" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.456298 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e06db1b-bded-42ad-bd1e-8f7d76faf399" containerName="glance-httpd" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.461872 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.479192 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.479635 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.483589 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.635082 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.635200 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a12e4088-00b9-416c-92a8-a40b997f06ea-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.635243 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a12e4088-00b9-416c-92a8-a40b997f06ea-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.636238 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a12e4088-00b9-416c-92a8-a40b997f06ea-logs\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.636308 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5f9g\" (UniqueName: \"kubernetes.io/projected/a12e4088-00b9-416c-92a8-a40b997f06ea-kube-api-access-z5f9g\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.636434 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a12e4088-00b9-416c-92a8-a40b997f06ea-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.636457 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a12e4088-00b9-416c-92a8-a40b997f06ea-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.636564 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a12e4088-00b9-416c-92a8-a40b997f06ea-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.731643 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9","Type":"ContainerStarted","Data":"1777beb34924cbd1096e5d8998d65a4cd7d2415e09129a1c937ad0af4cdfe342"} Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.741069 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.741150 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a12e4088-00b9-416c-92a8-a40b997f06ea-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.741177 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a12e4088-00b9-416c-92a8-a40b997f06ea-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.741237 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a12e4088-00b9-416c-92a8-a40b997f06ea-logs\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.741267 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5f9g\" (UniqueName: \"kubernetes.io/projected/a12e4088-00b9-416c-92a8-a40b997f06ea-kube-api-access-z5f9g\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.741338 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a12e4088-00b9-416c-92a8-a40b997f06ea-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.741358 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a12e4088-00b9-416c-92a8-a40b997f06ea-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.741424 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a12e4088-00b9-416c-92a8-a40b997f06ea-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.741929 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a12e4088-00b9-416c-92a8-a40b997f06ea-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.742199 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a12e4088-00b9-416c-92a8-a40b997f06ea-logs\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.742983 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.750757 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65249297-6776-467d-a69e-863e3a9702e4","Type":"ContainerStarted","Data":"8535c220de9bfab371dcefdd5a7443678968d6e1ee5ce77af1678101a3bc6f5c"} Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.759454 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a12e4088-00b9-416c-92a8-a40b997f06ea-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.762391 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a12e4088-00b9-416c-92a8-a40b997f06ea-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.763516 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a12e4088-00b9-416c-92a8-a40b997f06ea-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.765743 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a12e4088-00b9-416c-92a8-a40b997f06ea-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.781412 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5f9g\" (UniqueName: \"kubernetes.io/projected/a12e4088-00b9-416c-92a8-a40b997f06ea-kube-api-access-z5f9g\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.792165 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"a12e4088-00b9-416c-92a8-a40b997f06ea\") " pod="openstack/glance-default-internal-api-0" Jan 30 12:22:07 crc kubenswrapper[4703]: I0130 12:22:07.840379 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 30 12:22:08 crc kubenswrapper[4703]: I0130 12:22:08.658953 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 30 12:22:08 crc kubenswrapper[4703]: W0130 12:22:08.667302 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda12e4088_00b9_416c_92a8_a40b997f06ea.slice/crio-63f20881ff0cfbda9a4c6a389ec19abc59e29ad745c7d7ad9e8c45be9e515ba0 WatchSource:0}: Error finding container 63f20881ff0cfbda9a4c6a389ec19abc59e29ad745c7d7ad9e8c45be9e515ba0: Status 404 returned error can't find the container with id 63f20881ff0cfbda9a4c6a389ec19abc59e29ad745c7d7ad9e8c45be9e515ba0 Jan 30 12:22:08 crc kubenswrapper[4703]: I0130 12:22:08.765714 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a12e4088-00b9-416c-92a8-a40b997f06ea","Type":"ContainerStarted","Data":"63f20881ff0cfbda9a4c6a389ec19abc59e29ad745c7d7ad9e8c45be9e515ba0"} Jan 30 12:22:08 crc kubenswrapper[4703]: I0130 12:22:08.770028 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65249297-6776-467d-a69e-863e3a9702e4","Type":"ContainerStarted","Data":"88d3059a11cfb7baa4ed49fa8bf0653c640cae343f25dd88df4bd6983d6352e3"} Jan 30 12:22:08 crc kubenswrapper[4703]: I0130 12:22:08.778648 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9","Type":"ContainerStarted","Data":"85885c59f03fb28f409d4de33cf81790681818a29c52fe6609bc066de771e8c5"} Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.218206 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e06db1b-bded-42ad-bd1e-8f7d76faf399" path="/var/lib/kubelet/pods/2e06db1b-bded-42ad-bd1e-8f7d76faf399/volumes" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.614246 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-2cgdm"] Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.617027 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-2cgdm" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.620897 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-ngdwl" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.621332 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.621605 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.638343 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-2cgdm"] Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.641202 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-config-data\") pod \"nova-cell0-conductor-db-sync-2cgdm\" (UID: \"7284929e-736f-4b9e-bce6-b2128abd47fc\") " pod="openstack/nova-cell0-conductor-db-sync-2cgdm" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.641704 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-2cgdm\" (UID: \"7284929e-736f-4b9e-bce6-b2128abd47fc\") " pod="openstack/nova-cell0-conductor-db-sync-2cgdm" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.641742 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lzwm\" (UniqueName: \"kubernetes.io/projected/7284929e-736f-4b9e-bce6-b2128abd47fc-kube-api-access-7lzwm\") pod \"nova-cell0-conductor-db-sync-2cgdm\" (UID: \"7284929e-736f-4b9e-bce6-b2128abd47fc\") " pod="openstack/nova-cell0-conductor-db-sync-2cgdm" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.641812 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-scripts\") pod \"nova-cell0-conductor-db-sync-2cgdm\" (UID: \"7284929e-736f-4b9e-bce6-b2128abd47fc\") " pod="openstack/nova-cell0-conductor-db-sync-2cgdm" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.745997 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-2cgdm\" (UID: \"7284929e-736f-4b9e-bce6-b2128abd47fc\") " pod="openstack/nova-cell0-conductor-db-sync-2cgdm" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.746540 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lzwm\" (UniqueName: \"kubernetes.io/projected/7284929e-736f-4b9e-bce6-b2128abd47fc-kube-api-access-7lzwm\") pod \"nova-cell0-conductor-db-sync-2cgdm\" (UID: \"7284929e-736f-4b9e-bce6-b2128abd47fc\") " pod="openstack/nova-cell0-conductor-db-sync-2cgdm" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.746600 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-scripts\") pod \"nova-cell0-conductor-db-sync-2cgdm\" (UID: \"7284929e-736f-4b9e-bce6-b2128abd47fc\") " pod="openstack/nova-cell0-conductor-db-sync-2cgdm" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.746727 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-config-data\") pod \"nova-cell0-conductor-db-sync-2cgdm\" (UID: \"7284929e-736f-4b9e-bce6-b2128abd47fc\") " pod="openstack/nova-cell0-conductor-db-sync-2cgdm" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.754201 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-config-data\") pod \"nova-cell0-conductor-db-sync-2cgdm\" (UID: \"7284929e-736f-4b9e-bce6-b2128abd47fc\") " pod="openstack/nova-cell0-conductor-db-sync-2cgdm" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.754651 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-2cgdm\" (UID: \"7284929e-736f-4b9e-bce6-b2128abd47fc\") " pod="openstack/nova-cell0-conductor-db-sync-2cgdm" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.766543 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-scripts\") pod \"nova-cell0-conductor-db-sync-2cgdm\" (UID: \"7284929e-736f-4b9e-bce6-b2128abd47fc\") " pod="openstack/nova-cell0-conductor-db-sync-2cgdm" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.771859 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lzwm\" (UniqueName: \"kubernetes.io/projected/7284929e-736f-4b9e-bce6-b2128abd47fc-kube-api-access-7lzwm\") pod \"nova-cell0-conductor-db-sync-2cgdm\" (UID: \"7284929e-736f-4b9e-bce6-b2128abd47fc\") " pod="openstack/nova-cell0-conductor-db-sync-2cgdm" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.817766 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9","Type":"ContainerStarted","Data":"409eb013c36b968e0803ac6deff2bac3470420917b61eb852b8f2fa242c5fdb1"} Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.830817 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a12e4088-00b9-416c-92a8-a40b997f06ea","Type":"ContainerStarted","Data":"3468489f9a11f146e0447787efc534eb6bd777472e9dd620b3ab3b6d5df90590"} Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.873052 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.873015741 podStartE2EDuration="5.873015741s" podCreationTimestamp="2026-01-30 12:22:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:22:09.843665999 +0000 UTC m=+1565.621487663" watchObservedRunningTime="2026-01-30 12:22:09.873015741 +0000 UTC m=+1565.650837395" Jan 30 12:22:09 crc kubenswrapper[4703]: I0130 12:22:09.971719 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-2cgdm" Jan 30 12:22:10 crc kubenswrapper[4703]: I0130 12:22:10.452196 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-2cgdm"] Jan 30 12:22:10 crc kubenswrapper[4703]: I0130 12:22:10.500770 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:22:10 crc kubenswrapper[4703]: I0130 12:22:10.500908 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:22:11 crc kubenswrapper[4703]: I0130 12:22:11.003167 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:22:11 crc kubenswrapper[4703]: I0130 12:22:11.005229 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:22:11 crc kubenswrapper[4703]: I0130 12:22:11.029873 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a12e4088-00b9-416c-92a8-a40b997f06ea","Type":"ContainerStarted","Data":"993bdfc89392e1cfee4154a316cb4c5aa8da3569dcff6f6b2b0c26563d4eebc8"} Jan 30 12:22:11 crc kubenswrapper[4703]: I0130 12:22:11.033870 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-2cgdm" event={"ID":"7284929e-736f-4b9e-bce6-b2128abd47fc","Type":"ContainerStarted","Data":"6ffe5bc7311e3a14f6ce85cbf82272c24c763a1ed0e756fe7291b0c63669ff0b"} Jan 30 12:22:11 crc kubenswrapper[4703]: I0130 12:22:11.080938 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.080909898 podStartE2EDuration="4.080909898s" podCreationTimestamp="2026-01-30 12:22:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:22:11.075351764 +0000 UTC m=+1566.853173438" watchObservedRunningTime="2026-01-30 12:22:11.080909898 +0000 UTC m=+1566.858731552" Jan 30 12:22:12 crc kubenswrapper[4703]: I0130 12:22:12.053319 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65249297-6776-467d-a69e-863e3a9702e4","Type":"ContainerStarted","Data":"07f55484c80ec6722c54e982b05d5092192569f8054db9191c386c9b00493b03"} Jan 30 12:22:12 crc kubenswrapper[4703]: I0130 12:22:12.053980 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="65249297-6776-467d-a69e-863e3a9702e4" containerName="ceilometer-central-agent" containerID="cri-o://53abc2e3771959806dde05dfde05d58a401a7c5f4372f904e42e7e40cbd91918" gracePeriod=30 Jan 30 12:22:12 crc kubenswrapper[4703]: I0130 12:22:12.054706 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="65249297-6776-467d-a69e-863e3a9702e4" containerName="proxy-httpd" containerID="cri-o://07f55484c80ec6722c54e982b05d5092192569f8054db9191c386c9b00493b03" gracePeriod=30 Jan 30 12:22:12 crc kubenswrapper[4703]: I0130 12:22:12.054767 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="65249297-6776-467d-a69e-863e3a9702e4" containerName="sg-core" containerID="cri-o://88d3059a11cfb7baa4ed49fa8bf0653c640cae343f25dd88df4bd6983d6352e3" gracePeriod=30 Jan 30 12:22:12 crc kubenswrapper[4703]: I0130 12:22:12.054806 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="65249297-6776-467d-a69e-863e3a9702e4" containerName="ceilometer-notification-agent" containerID="cri-o://8535c220de9bfab371dcefdd5a7443678968d6e1ee5ce77af1678101a3bc6f5c" gracePeriod=30 Jan 30 12:22:13 crc kubenswrapper[4703]: I0130 12:22:13.072185 4703 generic.go:334] "Generic (PLEG): container finished" podID="65249297-6776-467d-a69e-863e3a9702e4" containerID="07f55484c80ec6722c54e982b05d5092192569f8054db9191c386c9b00493b03" exitCode=0 Jan 30 12:22:13 crc kubenswrapper[4703]: I0130 12:22:13.073554 4703 generic.go:334] "Generic (PLEG): container finished" podID="65249297-6776-467d-a69e-863e3a9702e4" containerID="88d3059a11cfb7baa4ed49fa8bf0653c640cae343f25dd88df4bd6983d6352e3" exitCode=2 Jan 30 12:22:13 crc kubenswrapper[4703]: I0130 12:22:13.073632 4703 generic.go:334] "Generic (PLEG): container finished" podID="65249297-6776-467d-a69e-863e3a9702e4" containerID="8535c220de9bfab371dcefdd5a7443678968d6e1ee5ce77af1678101a3bc6f5c" exitCode=0 Jan 30 12:22:13 crc kubenswrapper[4703]: I0130 12:22:13.072240 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65249297-6776-467d-a69e-863e3a9702e4","Type":"ContainerDied","Data":"07f55484c80ec6722c54e982b05d5092192569f8054db9191c386c9b00493b03"} Jan 30 12:22:13 crc kubenswrapper[4703]: I0130 12:22:13.073807 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65249297-6776-467d-a69e-863e3a9702e4","Type":"ContainerDied","Data":"88d3059a11cfb7baa4ed49fa8bf0653c640cae343f25dd88df4bd6983d6352e3"} Jan 30 12:22:13 crc kubenswrapper[4703]: I0130 12:22:13.074186 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65249297-6776-467d-a69e-863e3a9702e4","Type":"ContainerDied","Data":"8535c220de9bfab371dcefdd5a7443678968d6e1ee5ce77af1678101a3bc6f5c"} Jan 30 12:22:15 crc kubenswrapper[4703]: I0130 12:22:15.121435 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=6.140766 podStartE2EDuration="12.121407237s" podCreationTimestamp="2026-01-30 12:22:03 +0000 UTC" firstStartedPulling="2026-01-30 12:22:05.156094502 +0000 UTC m=+1560.933916156" lastFinishedPulling="2026-01-30 12:22:11.136735739 +0000 UTC m=+1566.914557393" observedRunningTime="2026-01-30 12:22:12.085272258 +0000 UTC m=+1567.863093912" watchObservedRunningTime="2026-01-30 12:22:15.121407237 +0000 UTC m=+1570.899228891" Jan 30 12:22:15 crc kubenswrapper[4703]: I0130 12:22:15.966530 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 30 12:22:15 crc kubenswrapper[4703]: I0130 12:22:15.967157 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 30 12:22:16 crc kubenswrapper[4703]: I0130 12:22:16.023870 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 30 12:22:16 crc kubenswrapper[4703]: I0130 12:22:16.036924 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 30 12:22:16 crc kubenswrapper[4703]: I0130 12:22:16.131345 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 30 12:22:16 crc kubenswrapper[4703]: I0130 12:22:16.131420 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 30 12:22:18 crc kubenswrapper[4703]: I0130 12:22:18.123964 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 30 12:22:18 crc kubenswrapper[4703]: I0130 12:22:18.124579 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 30 12:22:18 crc kubenswrapper[4703]: I0130 12:22:18.192630 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 30 12:22:18 crc kubenswrapper[4703]: I0130 12:22:18.194241 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 30 12:22:18 crc kubenswrapper[4703]: I0130 12:22:18.216968 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 30 12:22:19 crc kubenswrapper[4703]: I0130 12:22:19.072304 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 30 12:22:19 crc kubenswrapper[4703]: I0130 12:22:19.077582 4703 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 12:22:19 crc kubenswrapper[4703]: I0130 12:22:19.194862 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 30 12:22:19 crc kubenswrapper[4703]: I0130 12:22:19.383969 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 30 12:22:20 crc kubenswrapper[4703]: I0130 12:22:20.208722 4703 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 12:22:20 crc kubenswrapper[4703]: I0130 12:22:20.504328 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5f9958979d-8h859" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.159:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.159:8443: connect: connection refused" Jan 30 12:22:20 crc kubenswrapper[4703]: I0130 12:22:20.882191 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-77fb4cf9b8-pw692" podUID="9c6d3262-7469-45ac-b5c8-9eb0f9456a5a" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.160:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.160:8443: connect: connection refused" Jan 30 12:22:22 crc kubenswrapper[4703]: I0130 12:22:22.040678 4703 generic.go:334] "Generic (PLEG): container finished" podID="65249297-6776-467d-a69e-863e3a9702e4" containerID="53abc2e3771959806dde05dfde05d58a401a7c5f4372f904e42e7e40cbd91918" exitCode=0 Jan 30 12:22:22 crc kubenswrapper[4703]: I0130 12:22:22.040784 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65249297-6776-467d-a69e-863e3a9702e4","Type":"ContainerDied","Data":"53abc2e3771959806dde05dfde05d58a401a7c5f4372f904e42e7e40cbd91918"} Jan 30 12:22:25 crc kubenswrapper[4703]: I0130 12:22:25.751492 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 30 12:22:25 crc kubenswrapper[4703]: I0130 12:22:25.752040 4703 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 30 12:22:26 crc kubenswrapper[4703]: I0130 12:22:26.179499 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 30 12:22:26 crc kubenswrapper[4703]: E0130 12:22:26.743299 4703 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified" Jan 30 12:22:26 crc kubenswrapper[4703]: E0130 12:22:26.744027 4703 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:nova-cell0-conductor-db-sync,Image:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CELL_NAME,Value:cell0,ValueFrom:nil,},EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:false,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/kolla/config_files/config.json,SubPath:nova-conductor-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7lzwm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42436,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-cell0-conductor-db-sync-2cgdm_openstack(7284929e-736f-4b9e-bce6-b2128abd47fc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 30 12:22:26 crc kubenswrapper[4703]: E0130 12:22:26.745845 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-cell0-conductor-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/nova-cell0-conductor-db-sync-2cgdm" podUID="7284929e-736f-4b9e-bce6-b2128abd47fc" Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.158827 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65249297-6776-467d-a69e-863e3a9702e4","Type":"ContainerDied","Data":"95fe24e2320da1bdc46ac79afc802b0a4c29c5b4bcebe9136daa62bac6acbea1"} Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.158986 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95fe24e2320da1bdc46ac79afc802b0a4c29c5b4bcebe9136daa62bac6acbea1" Jan 30 12:22:27 crc kubenswrapper[4703]: E0130 12:22:27.169872 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-cell0-conductor-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified\\\"\"" pod="openstack/nova-cell0-conductor-db-sync-2cgdm" podUID="7284929e-736f-4b9e-bce6-b2128abd47fc" Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.249243 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.316515 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-stk6f\" (UniqueName: \"kubernetes.io/projected/65249297-6776-467d-a69e-863e3a9702e4-kube-api-access-stk6f\") pod \"65249297-6776-467d-a69e-863e3a9702e4\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.316668 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-sg-core-conf-yaml\") pod \"65249297-6776-467d-a69e-863e3a9702e4\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.316780 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-combined-ca-bundle\") pod \"65249297-6776-467d-a69e-863e3a9702e4\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.316852 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65249297-6776-467d-a69e-863e3a9702e4-run-httpd\") pod \"65249297-6776-467d-a69e-863e3a9702e4\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.316898 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-scripts\") pod \"65249297-6776-467d-a69e-863e3a9702e4\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.316919 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65249297-6776-467d-a69e-863e3a9702e4-log-httpd\") pod \"65249297-6776-467d-a69e-863e3a9702e4\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.316956 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-config-data\") pod \"65249297-6776-467d-a69e-863e3a9702e4\" (UID: \"65249297-6776-467d-a69e-863e3a9702e4\") " Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.317438 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65249297-6776-467d-a69e-863e3a9702e4-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "65249297-6776-467d-a69e-863e3a9702e4" (UID: "65249297-6776-467d-a69e-863e3a9702e4"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.317863 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65249297-6776-467d-a69e-863e3a9702e4-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "65249297-6776-467d-a69e-863e3a9702e4" (UID: "65249297-6776-467d-a69e-863e3a9702e4"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.319899 4703 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65249297-6776-467d-a69e-863e3a9702e4-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.319933 4703 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65249297-6776-467d-a69e-863e3a9702e4-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.327000 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-scripts" (OuterVolumeSpecName: "scripts") pod "65249297-6776-467d-a69e-863e3a9702e4" (UID: "65249297-6776-467d-a69e-863e3a9702e4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.341609 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65249297-6776-467d-a69e-863e3a9702e4-kube-api-access-stk6f" (OuterVolumeSpecName: "kube-api-access-stk6f") pod "65249297-6776-467d-a69e-863e3a9702e4" (UID: "65249297-6776-467d-a69e-863e3a9702e4"). InnerVolumeSpecName "kube-api-access-stk6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.374261 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "65249297-6776-467d-a69e-863e3a9702e4" (UID: "65249297-6776-467d-a69e-863e3a9702e4"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.636562 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.636611 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-stk6f\" (UniqueName: \"kubernetes.io/projected/65249297-6776-467d-a69e-863e3a9702e4-kube-api-access-stk6f\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.636638 4703 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.671757 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-config-data" (OuterVolumeSpecName: "config-data") pod "65249297-6776-467d-a69e-863e3a9702e4" (UID: "65249297-6776-467d-a69e-863e3a9702e4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.682438 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "65249297-6776-467d-a69e-863e3a9702e4" (UID: "65249297-6776-467d-a69e-863e3a9702e4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.742346 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:27 crc kubenswrapper[4703]: I0130 12:22:27.742399 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65249297-6776-467d-a69e-863e3a9702e4-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.180183 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.238985 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.262400 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.283025 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:22:28 crc kubenswrapper[4703]: E0130 12:22:28.283906 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65249297-6776-467d-a69e-863e3a9702e4" containerName="ceilometer-notification-agent" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.283941 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="65249297-6776-467d-a69e-863e3a9702e4" containerName="ceilometer-notification-agent" Jan 30 12:22:28 crc kubenswrapper[4703]: E0130 12:22:28.283962 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65249297-6776-467d-a69e-863e3a9702e4" containerName="sg-core" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.283973 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="65249297-6776-467d-a69e-863e3a9702e4" containerName="sg-core" Jan 30 12:22:28 crc kubenswrapper[4703]: E0130 12:22:28.283995 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65249297-6776-467d-a69e-863e3a9702e4" containerName="proxy-httpd" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.284004 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="65249297-6776-467d-a69e-863e3a9702e4" containerName="proxy-httpd" Jan 30 12:22:28 crc kubenswrapper[4703]: E0130 12:22:28.284037 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65249297-6776-467d-a69e-863e3a9702e4" containerName="ceilometer-central-agent" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.284046 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="65249297-6776-467d-a69e-863e3a9702e4" containerName="ceilometer-central-agent" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.284299 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="65249297-6776-467d-a69e-863e3a9702e4" containerName="sg-core" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.284326 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="65249297-6776-467d-a69e-863e3a9702e4" containerName="ceilometer-notification-agent" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.284342 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="65249297-6776-467d-a69e-863e3a9702e4" containerName="ceilometer-central-agent" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.284351 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="65249297-6776-467d-a69e-863e3a9702e4" containerName="proxy-httpd" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.287301 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.294577 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.299717 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.300031 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.363309 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-scripts\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.363460 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.363520 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/662a3210-b968-437a-90d3-4d5035fc40e4-run-httpd\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.363544 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.363581 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/662a3210-b968-437a-90d3-4d5035fc40e4-log-httpd\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.363662 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6k9kq\" (UniqueName: \"kubernetes.io/projected/662a3210-b968-437a-90d3-4d5035fc40e4-kube-api-access-6k9kq\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.363687 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-config-data\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.465936 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.466035 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/662a3210-b968-437a-90d3-4d5035fc40e4-run-httpd\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.466070 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.466109 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/662a3210-b968-437a-90d3-4d5035fc40e4-log-httpd\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.466157 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6k9kq\" (UniqueName: \"kubernetes.io/projected/662a3210-b968-437a-90d3-4d5035fc40e4-kube-api-access-6k9kq\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.466191 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-config-data\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.466224 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-scripts\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.467065 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/662a3210-b968-437a-90d3-4d5035fc40e4-log-httpd\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.467578 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/662a3210-b968-437a-90d3-4d5035fc40e4-run-httpd\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.475859 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-config-data\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.490150 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.490175 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-scripts\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.492313 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.498108 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6k9kq\" (UniqueName: \"kubernetes.io/projected/662a3210-b968-437a-90d3-4d5035fc40e4-kube-api-access-6k9kq\") pod \"ceilometer-0\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " pod="openstack/ceilometer-0" Jan 30 12:22:28 crc kubenswrapper[4703]: I0130 12:22:28.644321 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:22:29 crc kubenswrapper[4703]: I0130 12:22:29.101479 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65249297-6776-467d-a69e-863e3a9702e4" path="/var/lib/kubelet/pods/65249297-6776-467d-a69e-863e3a9702e4/volumes" Jan 30 12:22:29 crc kubenswrapper[4703]: I0130 12:22:29.931058 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:22:30 crc kubenswrapper[4703]: I0130 12:22:30.506807 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5f9958979d-8h859" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.159:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.159:8443: connect: connection refused" Jan 30 12:22:30 crc kubenswrapper[4703]: I0130 12:22:30.879393 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-77fb4cf9b8-pw692" podUID="9c6d3262-7469-45ac-b5c8-9eb0f9456a5a" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.160:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.160:8443: connect: connection refused" Jan 30 12:22:30 crc kubenswrapper[4703]: I0130 12:22:30.919507 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"662a3210-b968-437a-90d3-4d5035fc40e4","Type":"ContainerStarted","Data":"849f1dd9022e0c5781c822e893a747fdc6b87178a3e7765396f2e1fa79b8eea3"} Jan 30 12:22:30 crc kubenswrapper[4703]: I0130 12:22:30.919587 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"662a3210-b968-437a-90d3-4d5035fc40e4","Type":"ContainerStarted","Data":"626f11ef399c4376c11b38dc819f8ce4747fb8acc13935596949c416aece0d4a"} Jan 30 12:22:33 crc kubenswrapper[4703]: I0130 12:22:33.670375 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"662a3210-b968-437a-90d3-4d5035fc40e4","Type":"ContainerStarted","Data":"2d426baa2f0703e35e59ca5f8a705f192571fd37541e5ab5f163c7216fdd5564"} Jan 30 12:22:34 crc kubenswrapper[4703]: I0130 12:22:34.684334 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"662a3210-b968-437a-90d3-4d5035fc40e4","Type":"ContainerStarted","Data":"9962edfb7ec0957700304dec4c5f84bed4b7d578b9f7df2bee6ed8342aa03742"} Jan 30 12:22:36 crc kubenswrapper[4703]: I0130 12:22:36.174651 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:22:36 crc kubenswrapper[4703]: I0130 12:22:36.712961 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"662a3210-b968-437a-90d3-4d5035fc40e4","Type":"ContainerStarted","Data":"7464820f20d2623adf5c0b4f37bc7205ed570493fcce389ca912f434c7a4de1c"} Jan 30 12:22:36 crc kubenswrapper[4703]: I0130 12:22:36.713255 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" containerName="ceilometer-central-agent" containerID="cri-o://849f1dd9022e0c5781c822e893a747fdc6b87178a3e7765396f2e1fa79b8eea3" gracePeriod=30 Jan 30 12:22:36 crc kubenswrapper[4703]: I0130 12:22:36.713349 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 30 12:22:36 crc kubenswrapper[4703]: I0130 12:22:36.713402 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" containerName="ceilometer-notification-agent" containerID="cri-o://2d426baa2f0703e35e59ca5f8a705f192571fd37541e5ab5f163c7216fdd5564" gracePeriod=30 Jan 30 12:22:36 crc kubenswrapper[4703]: I0130 12:22:36.713375 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" containerName="proxy-httpd" containerID="cri-o://7464820f20d2623adf5c0b4f37bc7205ed570493fcce389ca912f434c7a4de1c" gracePeriod=30 Jan 30 12:22:36 crc kubenswrapper[4703]: I0130 12:22:36.713389 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" containerName="sg-core" containerID="cri-o://9962edfb7ec0957700304dec4c5f84bed4b7d578b9f7df2bee6ed8342aa03742" gracePeriod=30 Jan 30 12:22:36 crc kubenswrapper[4703]: I0130 12:22:36.754954 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.683454245 podStartE2EDuration="8.754927691s" podCreationTimestamp="2026-01-30 12:22:28 +0000 UTC" firstStartedPulling="2026-01-30 12:22:29.95029615 +0000 UTC m=+1585.728117804" lastFinishedPulling="2026-01-30 12:22:36.021769596 +0000 UTC m=+1591.799591250" observedRunningTime="2026-01-30 12:22:36.752538059 +0000 UTC m=+1592.530359713" watchObservedRunningTime="2026-01-30 12:22:36.754927691 +0000 UTC m=+1592.532749345" Jan 30 12:22:37 crc kubenswrapper[4703]: I0130 12:22:37.729992 4703 generic.go:334] "Generic (PLEG): container finished" podID="662a3210-b968-437a-90d3-4d5035fc40e4" containerID="9962edfb7ec0957700304dec4c5f84bed4b7d578b9f7df2bee6ed8342aa03742" exitCode=2 Jan 30 12:22:37 crc kubenswrapper[4703]: I0130 12:22:37.730327 4703 generic.go:334] "Generic (PLEG): container finished" podID="662a3210-b968-437a-90d3-4d5035fc40e4" containerID="2d426baa2f0703e35e59ca5f8a705f192571fd37541e5ab5f163c7216fdd5564" exitCode=0 Jan 30 12:22:37 crc kubenswrapper[4703]: I0130 12:22:37.730359 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"662a3210-b968-437a-90d3-4d5035fc40e4","Type":"ContainerDied","Data":"9962edfb7ec0957700304dec4c5f84bed4b7d578b9f7df2bee6ed8342aa03742"} Jan 30 12:22:37 crc kubenswrapper[4703]: I0130 12:22:37.730420 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"662a3210-b968-437a-90d3-4d5035fc40e4","Type":"ContainerDied","Data":"2d426baa2f0703e35e59ca5f8a705f192571fd37541e5ab5f163c7216fdd5564"} Jan 30 12:22:42 crc kubenswrapper[4703]: I0130 12:22:42.927820 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-2cgdm" event={"ID":"7284929e-736f-4b9e-bce6-b2128abd47fc","Type":"ContainerStarted","Data":"564df5fc426738750afcd0688e8875ff98cb65017665db03b209bc8c51021e9c"} Jan 30 12:22:42 crc kubenswrapper[4703]: I0130 12:22:42.976341 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-2cgdm" podStartSLOduration=1.918205632 podStartE2EDuration="33.976306411s" podCreationTimestamp="2026-01-30 12:22:09 +0000 UTC" firstStartedPulling="2026-01-30 12:22:10.483374106 +0000 UTC m=+1566.261195760" lastFinishedPulling="2026-01-30 12:22:42.541474885 +0000 UTC m=+1598.319296539" observedRunningTime="2026-01-30 12:22:42.956450455 +0000 UTC m=+1598.734272119" watchObservedRunningTime="2026-01-30 12:22:42.976306411 +0000 UTC m=+1598.754128255" Jan 30 12:22:43 crc kubenswrapper[4703]: I0130 12:22:43.927523 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:22:43 crc kubenswrapper[4703]: I0130 12:22:43.928726 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:22:43 crc kubenswrapper[4703]: I0130 12:22:43.953762 4703 generic.go:334] "Generic (PLEG): container finished" podID="662a3210-b968-437a-90d3-4d5035fc40e4" containerID="849f1dd9022e0c5781c822e893a747fdc6b87178a3e7765396f2e1fa79b8eea3" exitCode=0 Jan 30 12:22:43 crc kubenswrapper[4703]: I0130 12:22:43.953836 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"662a3210-b968-437a-90d3-4d5035fc40e4","Type":"ContainerDied","Data":"849f1dd9022e0c5781c822e893a747fdc6b87178a3e7765396f2e1fa79b8eea3"} Jan 30 12:22:46 crc kubenswrapper[4703]: I0130 12:22:46.346341 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:22:47 crc kubenswrapper[4703]: I0130 12:22:47.195824 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-77fb4cf9b8-pw692" Jan 30 12:22:47 crc kubenswrapper[4703]: I0130 12:22:47.266859 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5f9958979d-8h859"] Jan 30 12:22:47 crc kubenswrapper[4703]: I0130 12:22:47.267197 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5f9958979d-8h859" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon-log" containerID="cri-o://515f8dda2bc0f9be8dcdb24b41cdf2299f04b0afc161bd211413cc06590da029" gracePeriod=30 Jan 30 12:22:47 crc kubenswrapper[4703]: I0130 12:22:47.267388 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5f9958979d-8h859" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" containerID="cri-o://12c6e01d1ec7cf675be0982e70d170f25953b74704982af7f66aaded402ddb28" gracePeriod=30 Jan 30 12:22:48 crc kubenswrapper[4703]: I0130 12:22:48.047681 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ctn7s"] Jan 30 12:22:48 crc kubenswrapper[4703]: I0130 12:22:48.050699 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ctn7s" Jan 30 12:22:48 crc kubenswrapper[4703]: I0130 12:22:48.084896 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ctn7s"] Jan 30 12:22:48 crc kubenswrapper[4703]: I0130 12:22:48.144598 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f269a22a-2cfd-45ca-a034-15bfc9ed7841-utilities\") pod \"community-operators-ctn7s\" (UID: \"f269a22a-2cfd-45ca-a034-15bfc9ed7841\") " pod="openshift-marketplace/community-operators-ctn7s" Jan 30 12:22:48 crc kubenswrapper[4703]: I0130 12:22:48.144660 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bz7gc\" (UniqueName: \"kubernetes.io/projected/f269a22a-2cfd-45ca-a034-15bfc9ed7841-kube-api-access-bz7gc\") pod \"community-operators-ctn7s\" (UID: \"f269a22a-2cfd-45ca-a034-15bfc9ed7841\") " pod="openshift-marketplace/community-operators-ctn7s" Jan 30 12:22:48 crc kubenswrapper[4703]: I0130 12:22:48.144719 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f269a22a-2cfd-45ca-a034-15bfc9ed7841-catalog-content\") pod \"community-operators-ctn7s\" (UID: \"f269a22a-2cfd-45ca-a034-15bfc9ed7841\") " pod="openshift-marketplace/community-operators-ctn7s" Jan 30 12:22:48 crc kubenswrapper[4703]: I0130 12:22:48.247011 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f269a22a-2cfd-45ca-a034-15bfc9ed7841-utilities\") pod \"community-operators-ctn7s\" (UID: \"f269a22a-2cfd-45ca-a034-15bfc9ed7841\") " pod="openshift-marketplace/community-operators-ctn7s" Jan 30 12:22:48 crc kubenswrapper[4703]: I0130 12:22:48.247072 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bz7gc\" (UniqueName: \"kubernetes.io/projected/f269a22a-2cfd-45ca-a034-15bfc9ed7841-kube-api-access-bz7gc\") pod \"community-operators-ctn7s\" (UID: \"f269a22a-2cfd-45ca-a034-15bfc9ed7841\") " pod="openshift-marketplace/community-operators-ctn7s" Jan 30 12:22:48 crc kubenswrapper[4703]: I0130 12:22:48.247110 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f269a22a-2cfd-45ca-a034-15bfc9ed7841-catalog-content\") pod \"community-operators-ctn7s\" (UID: \"f269a22a-2cfd-45ca-a034-15bfc9ed7841\") " pod="openshift-marketplace/community-operators-ctn7s" Jan 30 12:22:48 crc kubenswrapper[4703]: I0130 12:22:48.247773 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f269a22a-2cfd-45ca-a034-15bfc9ed7841-catalog-content\") pod \"community-operators-ctn7s\" (UID: \"f269a22a-2cfd-45ca-a034-15bfc9ed7841\") " pod="openshift-marketplace/community-operators-ctn7s" Jan 30 12:22:48 crc kubenswrapper[4703]: I0130 12:22:48.248064 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f269a22a-2cfd-45ca-a034-15bfc9ed7841-utilities\") pod \"community-operators-ctn7s\" (UID: \"f269a22a-2cfd-45ca-a034-15bfc9ed7841\") " pod="openshift-marketplace/community-operators-ctn7s" Jan 30 12:22:48 crc kubenswrapper[4703]: I0130 12:22:48.274816 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bz7gc\" (UniqueName: \"kubernetes.io/projected/f269a22a-2cfd-45ca-a034-15bfc9ed7841-kube-api-access-bz7gc\") pod \"community-operators-ctn7s\" (UID: \"f269a22a-2cfd-45ca-a034-15bfc9ed7841\") " pod="openshift-marketplace/community-operators-ctn7s" Jan 30 12:22:48 crc kubenswrapper[4703]: I0130 12:22:48.383019 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ctn7s" Jan 30 12:22:49 crc kubenswrapper[4703]: I0130 12:22:49.115312 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ctn7s" event={"ID":"f269a22a-2cfd-45ca-a034-15bfc9ed7841","Type":"ContainerStarted","Data":"fe00b035b04cdb607dd0ee7ec4321b583889a9335271555c90e86d4f69b4a791"} Jan 30 12:22:49 crc kubenswrapper[4703]: I0130 12:22:49.115866 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ctn7s"] Jan 30 12:22:50 crc kubenswrapper[4703]: I0130 12:22:50.127842 4703 generic.go:334] "Generic (PLEG): container finished" podID="f269a22a-2cfd-45ca-a034-15bfc9ed7841" containerID="edd860e36790382330f50b74b865128facc0d74f344e8638706f6ee5b6c583a8" exitCode=0 Jan 30 12:22:50 crc kubenswrapper[4703]: I0130 12:22:50.127968 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ctn7s" event={"ID":"f269a22a-2cfd-45ca-a034-15bfc9ed7841","Type":"ContainerDied","Data":"edd860e36790382330f50b74b865128facc0d74f344e8638706f6ee5b6c583a8"} Jan 30 12:22:50 crc kubenswrapper[4703]: I0130 12:22:50.500955 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5f9958979d-8h859" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.159:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.159:8443: connect: connection refused" Jan 30 12:22:51 crc kubenswrapper[4703]: I0130 12:22:51.159386 4703 generic.go:334] "Generic (PLEG): container finished" podID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerID="12c6e01d1ec7cf675be0982e70d170f25953b74704982af7f66aaded402ddb28" exitCode=0 Jan 30 12:22:51 crc kubenswrapper[4703]: I0130 12:22:51.159970 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5f9958979d-8h859" event={"ID":"b888ea51-970d-4f4d-9e5c-f456ca173472","Type":"ContainerDied","Data":"12c6e01d1ec7cf675be0982e70d170f25953b74704982af7f66aaded402ddb28"} Jan 30 12:22:51 crc kubenswrapper[4703]: I0130 12:22:51.160033 4703 scope.go:117] "RemoveContainer" containerID="d757dd822a89b950ce6e9d4dc97199e2572ee94ccf2beca2ecace35453f877f7" Jan 30 12:22:51 crc kubenswrapper[4703]: I0130 12:22:51.246660 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-nzw46"] Jan 30 12:22:51 crc kubenswrapper[4703]: I0130 12:22:51.250973 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nzw46" Jan 30 12:22:51 crc kubenswrapper[4703]: I0130 12:22:51.295657 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nzw46"] Jan 30 12:22:51 crc kubenswrapper[4703]: I0130 12:22:51.353432 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37bff1f1-8dec-4d2e-994b-757b83902321-catalog-content\") pod \"certified-operators-nzw46\" (UID: \"37bff1f1-8dec-4d2e-994b-757b83902321\") " pod="openshift-marketplace/certified-operators-nzw46" Jan 30 12:22:51 crc kubenswrapper[4703]: I0130 12:22:51.353569 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kzkt\" (UniqueName: \"kubernetes.io/projected/37bff1f1-8dec-4d2e-994b-757b83902321-kube-api-access-2kzkt\") pod \"certified-operators-nzw46\" (UID: \"37bff1f1-8dec-4d2e-994b-757b83902321\") " pod="openshift-marketplace/certified-operators-nzw46" Jan 30 12:22:51 crc kubenswrapper[4703]: I0130 12:22:51.353672 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37bff1f1-8dec-4d2e-994b-757b83902321-utilities\") pod \"certified-operators-nzw46\" (UID: \"37bff1f1-8dec-4d2e-994b-757b83902321\") " pod="openshift-marketplace/certified-operators-nzw46" Jan 30 12:22:51 crc kubenswrapper[4703]: I0130 12:22:51.457589 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37bff1f1-8dec-4d2e-994b-757b83902321-catalog-content\") pod \"certified-operators-nzw46\" (UID: \"37bff1f1-8dec-4d2e-994b-757b83902321\") " pod="openshift-marketplace/certified-operators-nzw46" Jan 30 12:22:51 crc kubenswrapper[4703]: I0130 12:22:51.457675 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kzkt\" (UniqueName: \"kubernetes.io/projected/37bff1f1-8dec-4d2e-994b-757b83902321-kube-api-access-2kzkt\") pod \"certified-operators-nzw46\" (UID: \"37bff1f1-8dec-4d2e-994b-757b83902321\") " pod="openshift-marketplace/certified-operators-nzw46" Jan 30 12:22:51 crc kubenswrapper[4703]: I0130 12:22:51.457778 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37bff1f1-8dec-4d2e-994b-757b83902321-utilities\") pod \"certified-operators-nzw46\" (UID: \"37bff1f1-8dec-4d2e-994b-757b83902321\") " pod="openshift-marketplace/certified-operators-nzw46" Jan 30 12:22:51 crc kubenswrapper[4703]: I0130 12:22:51.460559 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37bff1f1-8dec-4d2e-994b-757b83902321-utilities\") pod \"certified-operators-nzw46\" (UID: \"37bff1f1-8dec-4d2e-994b-757b83902321\") " pod="openshift-marketplace/certified-operators-nzw46" Jan 30 12:22:51 crc kubenswrapper[4703]: I0130 12:22:51.467603 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37bff1f1-8dec-4d2e-994b-757b83902321-catalog-content\") pod \"certified-operators-nzw46\" (UID: \"37bff1f1-8dec-4d2e-994b-757b83902321\") " pod="openshift-marketplace/certified-operators-nzw46" Jan 30 12:22:51 crc kubenswrapper[4703]: I0130 12:22:51.507257 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kzkt\" (UniqueName: \"kubernetes.io/projected/37bff1f1-8dec-4d2e-994b-757b83902321-kube-api-access-2kzkt\") pod \"certified-operators-nzw46\" (UID: \"37bff1f1-8dec-4d2e-994b-757b83902321\") " pod="openshift-marketplace/certified-operators-nzw46" Jan 30 12:22:51 crc kubenswrapper[4703]: I0130 12:22:51.603639 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nzw46" Jan 30 12:22:52 crc kubenswrapper[4703]: I0130 12:22:52.241736 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ctn7s" event={"ID":"f269a22a-2cfd-45ca-a034-15bfc9ed7841","Type":"ContainerStarted","Data":"8d8f539b0c55bc3039c25a3ba639cb7728a2f7b507d579c9ba29c8fbe83431bf"} Jan 30 12:22:52 crc kubenswrapper[4703]: I0130 12:22:52.300483 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nzw46"] Jan 30 12:22:53 crc kubenswrapper[4703]: I0130 12:22:53.273159 4703 generic.go:334] "Generic (PLEG): container finished" podID="f269a22a-2cfd-45ca-a034-15bfc9ed7841" containerID="8d8f539b0c55bc3039c25a3ba639cb7728a2f7b507d579c9ba29c8fbe83431bf" exitCode=0 Jan 30 12:22:53 crc kubenswrapper[4703]: I0130 12:22:53.273237 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ctn7s" event={"ID":"f269a22a-2cfd-45ca-a034-15bfc9ed7841","Type":"ContainerDied","Data":"8d8f539b0c55bc3039c25a3ba639cb7728a2f7b507d579c9ba29c8fbe83431bf"} Jan 30 12:22:53 crc kubenswrapper[4703]: I0130 12:22:53.276173 4703 generic.go:334] "Generic (PLEG): container finished" podID="37bff1f1-8dec-4d2e-994b-757b83902321" containerID="7b51e23f8924fb06b51551cc64c14485b831369060adc28fdf51d41e18f8a8d1" exitCode=0 Jan 30 12:22:53 crc kubenswrapper[4703]: I0130 12:22:53.276231 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nzw46" event={"ID":"37bff1f1-8dec-4d2e-994b-757b83902321","Type":"ContainerDied","Data":"7b51e23f8924fb06b51551cc64c14485b831369060adc28fdf51d41e18f8a8d1"} Jan 30 12:22:53 crc kubenswrapper[4703]: I0130 12:22:53.276268 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nzw46" event={"ID":"37bff1f1-8dec-4d2e-994b-757b83902321","Type":"ContainerStarted","Data":"cedbd038604eeb1a038a5b44742e4a6da87ad663e441f6899cc378cc82e8d6af"} Jan 30 12:22:54 crc kubenswrapper[4703]: I0130 12:22:54.297611 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nzw46" event={"ID":"37bff1f1-8dec-4d2e-994b-757b83902321","Type":"ContainerStarted","Data":"de812ddbc0922c4e21eeeab53b845677aeb899799cb1ef81a47024dab5c1f64d"} Jan 30 12:22:54 crc kubenswrapper[4703]: I0130 12:22:54.302068 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ctn7s" event={"ID":"f269a22a-2cfd-45ca-a034-15bfc9ed7841","Type":"ContainerStarted","Data":"06fe83faff6e44d3315cdb2fc4f63fe4fba09164ddc33faacb1864aaede9948a"} Jan 30 12:22:54 crc kubenswrapper[4703]: I0130 12:22:54.347573 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ctn7s" podStartSLOduration=2.798952528 podStartE2EDuration="6.347546807s" podCreationTimestamp="2026-01-30 12:22:48 +0000 UTC" firstStartedPulling="2026-01-30 12:22:50.13038242 +0000 UTC m=+1605.908204074" lastFinishedPulling="2026-01-30 12:22:53.678976699 +0000 UTC m=+1609.456798353" observedRunningTime="2026-01-30 12:22:54.342800123 +0000 UTC m=+1610.120621777" watchObservedRunningTime="2026-01-30 12:22:54.347546807 +0000 UTC m=+1610.125368461" Jan 30 12:22:55 crc kubenswrapper[4703]: I0130 12:22:55.320676 4703 generic.go:334] "Generic (PLEG): container finished" podID="37bff1f1-8dec-4d2e-994b-757b83902321" containerID="de812ddbc0922c4e21eeeab53b845677aeb899799cb1ef81a47024dab5c1f64d" exitCode=0 Jan 30 12:22:55 crc kubenswrapper[4703]: I0130 12:22:55.322575 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nzw46" event={"ID":"37bff1f1-8dec-4d2e-994b-757b83902321","Type":"ContainerDied","Data":"de812ddbc0922c4e21eeeab53b845677aeb899799cb1ef81a47024dab5c1f64d"} Jan 30 12:22:58 crc kubenswrapper[4703]: I0130 12:22:58.460995 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ctn7s" Jan 30 12:22:58 crc kubenswrapper[4703]: I0130 12:22:58.497502 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ctn7s" Jan 30 12:22:58 crc kubenswrapper[4703]: I0130 12:22:58.651720 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 30 12:22:58 crc kubenswrapper[4703]: I0130 12:22:58.665172 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ctn7s" Jan 30 12:22:59 crc kubenswrapper[4703]: I0130 12:22:59.533612 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nzw46" event={"ID":"37bff1f1-8dec-4d2e-994b-757b83902321","Type":"ContainerStarted","Data":"3fbb404760851f02a2e260bf1a744dc574969285804230188133d5deacd6d7f2"} Jan 30 12:22:59 crc kubenswrapper[4703]: I0130 12:22:59.576372 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-nzw46" podStartSLOduration=4.094652664 podStartE2EDuration="8.576344243s" podCreationTimestamp="2026-01-30 12:22:51 +0000 UTC" firstStartedPulling="2026-01-30 12:22:53.27747825 +0000 UTC m=+1609.055299904" lastFinishedPulling="2026-01-30 12:22:57.759169829 +0000 UTC m=+1613.536991483" observedRunningTime="2026-01-30 12:22:59.567704549 +0000 UTC m=+1615.345526213" watchObservedRunningTime="2026-01-30 12:22:59.576344243 +0000 UTC m=+1615.354165897" Jan 30 12:22:59 crc kubenswrapper[4703]: I0130 12:22:59.592309 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ctn7s" Jan 30 12:23:00 crc kubenswrapper[4703]: I0130 12:23:00.501394 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5f9958979d-8h859" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.159:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.159:8443: connect: connection refused" Jan 30 12:23:01 crc kubenswrapper[4703]: I0130 12:23:01.922240 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-nzw46" Jan 30 12:23:01 crc kubenswrapper[4703]: I0130 12:23:01.923000 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-nzw46" Jan 30 12:23:01 crc kubenswrapper[4703]: I0130 12:23:01.980038 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ctn7s"] Jan 30 12:23:01 crc kubenswrapper[4703]: I0130 12:23:01.980345 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ctn7s" podUID="f269a22a-2cfd-45ca-a034-15bfc9ed7841" containerName="registry-server" containerID="cri-o://06fe83faff6e44d3315cdb2fc4f63fe4fba09164ddc33faacb1864aaede9948a" gracePeriod=2 Jan 30 12:23:02 crc kubenswrapper[4703]: I0130 12:23:02.008690 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-nzw46" Jan 30 12:23:02 crc kubenswrapper[4703]: E0130 12:23:02.331666 4703 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf269a22a_2cfd_45ca_a034_15bfc9ed7841.slice/crio-06fe83faff6e44d3315cdb2fc4f63fe4fba09164ddc33faacb1864aaede9948a.scope\": RecentStats: unable to find data in memory cache]" Jan 30 12:23:02 crc kubenswrapper[4703]: I0130 12:23:02.571163 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ctn7s" Jan 30 12:23:02 crc kubenswrapper[4703]: I0130 12:23:02.662672 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f269a22a-2cfd-45ca-a034-15bfc9ed7841-catalog-content\") pod \"f269a22a-2cfd-45ca-a034-15bfc9ed7841\" (UID: \"f269a22a-2cfd-45ca-a034-15bfc9ed7841\") " Jan 30 12:23:02 crc kubenswrapper[4703]: I0130 12:23:02.662789 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bz7gc\" (UniqueName: \"kubernetes.io/projected/f269a22a-2cfd-45ca-a034-15bfc9ed7841-kube-api-access-bz7gc\") pod \"f269a22a-2cfd-45ca-a034-15bfc9ed7841\" (UID: \"f269a22a-2cfd-45ca-a034-15bfc9ed7841\") " Jan 30 12:23:02 crc kubenswrapper[4703]: I0130 12:23:02.663038 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f269a22a-2cfd-45ca-a034-15bfc9ed7841-utilities\") pod \"f269a22a-2cfd-45ca-a034-15bfc9ed7841\" (UID: \"f269a22a-2cfd-45ca-a034-15bfc9ed7841\") " Jan 30 12:23:02 crc kubenswrapper[4703]: I0130 12:23:02.664567 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f269a22a-2cfd-45ca-a034-15bfc9ed7841-utilities" (OuterVolumeSpecName: "utilities") pod "f269a22a-2cfd-45ca-a034-15bfc9ed7841" (UID: "f269a22a-2cfd-45ca-a034-15bfc9ed7841"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:23:02 crc kubenswrapper[4703]: I0130 12:23:02.715678 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f269a22a-2cfd-45ca-a034-15bfc9ed7841-kube-api-access-bz7gc" (OuterVolumeSpecName: "kube-api-access-bz7gc") pod "f269a22a-2cfd-45ca-a034-15bfc9ed7841" (UID: "f269a22a-2cfd-45ca-a034-15bfc9ed7841"). InnerVolumeSpecName "kube-api-access-bz7gc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:23:02 crc kubenswrapper[4703]: I0130 12:23:02.776326 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bz7gc\" (UniqueName: \"kubernetes.io/projected/f269a22a-2cfd-45ca-a034-15bfc9ed7841-kube-api-access-bz7gc\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:02 crc kubenswrapper[4703]: I0130 12:23:02.776383 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f269a22a-2cfd-45ca-a034-15bfc9ed7841-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:02 crc kubenswrapper[4703]: I0130 12:23:02.782229 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f269a22a-2cfd-45ca-a034-15bfc9ed7841-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f269a22a-2cfd-45ca-a034-15bfc9ed7841" (UID: "f269a22a-2cfd-45ca-a034-15bfc9ed7841"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:23:02 crc kubenswrapper[4703]: I0130 12:23:02.879220 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f269a22a-2cfd-45ca-a034-15bfc9ed7841-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:02 crc kubenswrapper[4703]: I0130 12:23:02.963233 4703 generic.go:334] "Generic (PLEG): container finished" podID="f269a22a-2cfd-45ca-a034-15bfc9ed7841" containerID="06fe83faff6e44d3315cdb2fc4f63fe4fba09164ddc33faacb1864aaede9948a" exitCode=0 Jan 30 12:23:02 crc kubenswrapper[4703]: I0130 12:23:02.963316 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ctn7s" Jan 30 12:23:02 crc kubenswrapper[4703]: I0130 12:23:02.963340 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ctn7s" event={"ID":"f269a22a-2cfd-45ca-a034-15bfc9ed7841","Type":"ContainerDied","Data":"06fe83faff6e44d3315cdb2fc4f63fe4fba09164ddc33faacb1864aaede9948a"} Jan 30 12:23:02 crc kubenswrapper[4703]: I0130 12:23:02.963599 4703 scope.go:117] "RemoveContainer" containerID="06fe83faff6e44d3315cdb2fc4f63fe4fba09164ddc33faacb1864aaede9948a" Jan 30 12:23:02 crc kubenswrapper[4703]: I0130 12:23:02.963693 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ctn7s" event={"ID":"f269a22a-2cfd-45ca-a034-15bfc9ed7841","Type":"ContainerDied","Data":"fe00b035b04cdb607dd0ee7ec4321b583889a9335271555c90e86d4f69b4a791"} Jan 30 12:23:02 crc kubenswrapper[4703]: I0130 12:23:02.996993 4703 scope.go:117] "RemoveContainer" containerID="8d8f539b0c55bc3039c25a3ba639cb7728a2f7b507d579c9ba29c8fbe83431bf" Jan 30 12:23:03 crc kubenswrapper[4703]: I0130 12:23:03.007184 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ctn7s"] Jan 30 12:23:03 crc kubenswrapper[4703]: I0130 12:23:03.044417 4703 scope.go:117] "RemoveContainer" containerID="edd860e36790382330f50b74b865128facc0d74f344e8638706f6ee5b6c583a8" Jan 30 12:23:03 crc kubenswrapper[4703]: I0130 12:23:03.055338 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ctn7s"] Jan 30 12:23:03 crc kubenswrapper[4703]: I0130 12:23:03.093514 4703 scope.go:117] "RemoveContainer" containerID="06fe83faff6e44d3315cdb2fc4f63fe4fba09164ddc33faacb1864aaede9948a" Jan 30 12:23:03 crc kubenswrapper[4703]: E0130 12:23:03.094088 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06fe83faff6e44d3315cdb2fc4f63fe4fba09164ddc33faacb1864aaede9948a\": container with ID starting with 06fe83faff6e44d3315cdb2fc4f63fe4fba09164ddc33faacb1864aaede9948a not found: ID does not exist" containerID="06fe83faff6e44d3315cdb2fc4f63fe4fba09164ddc33faacb1864aaede9948a" Jan 30 12:23:03 crc kubenswrapper[4703]: I0130 12:23:03.094172 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06fe83faff6e44d3315cdb2fc4f63fe4fba09164ddc33faacb1864aaede9948a"} err="failed to get container status \"06fe83faff6e44d3315cdb2fc4f63fe4fba09164ddc33faacb1864aaede9948a\": rpc error: code = NotFound desc = could not find container \"06fe83faff6e44d3315cdb2fc4f63fe4fba09164ddc33faacb1864aaede9948a\": container with ID starting with 06fe83faff6e44d3315cdb2fc4f63fe4fba09164ddc33faacb1864aaede9948a not found: ID does not exist" Jan 30 12:23:03 crc kubenswrapper[4703]: I0130 12:23:03.094214 4703 scope.go:117] "RemoveContainer" containerID="8d8f539b0c55bc3039c25a3ba639cb7728a2f7b507d579c9ba29c8fbe83431bf" Jan 30 12:23:03 crc kubenswrapper[4703]: E0130 12:23:03.095334 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d8f539b0c55bc3039c25a3ba639cb7728a2f7b507d579c9ba29c8fbe83431bf\": container with ID starting with 8d8f539b0c55bc3039c25a3ba639cb7728a2f7b507d579c9ba29c8fbe83431bf not found: ID does not exist" containerID="8d8f539b0c55bc3039c25a3ba639cb7728a2f7b507d579c9ba29c8fbe83431bf" Jan 30 12:23:03 crc kubenswrapper[4703]: I0130 12:23:03.095393 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d8f539b0c55bc3039c25a3ba639cb7728a2f7b507d579c9ba29c8fbe83431bf"} err="failed to get container status \"8d8f539b0c55bc3039c25a3ba639cb7728a2f7b507d579c9ba29c8fbe83431bf\": rpc error: code = NotFound desc = could not find container \"8d8f539b0c55bc3039c25a3ba639cb7728a2f7b507d579c9ba29c8fbe83431bf\": container with ID starting with 8d8f539b0c55bc3039c25a3ba639cb7728a2f7b507d579c9ba29c8fbe83431bf not found: ID does not exist" Jan 30 12:23:03 crc kubenswrapper[4703]: I0130 12:23:03.095431 4703 scope.go:117] "RemoveContainer" containerID="edd860e36790382330f50b74b865128facc0d74f344e8638706f6ee5b6c583a8" Jan 30 12:23:03 crc kubenswrapper[4703]: E0130 12:23:03.096687 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edd860e36790382330f50b74b865128facc0d74f344e8638706f6ee5b6c583a8\": container with ID starting with edd860e36790382330f50b74b865128facc0d74f344e8638706f6ee5b6c583a8 not found: ID does not exist" containerID="edd860e36790382330f50b74b865128facc0d74f344e8638706f6ee5b6c583a8" Jan 30 12:23:03 crc kubenswrapper[4703]: I0130 12:23:03.096729 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edd860e36790382330f50b74b865128facc0d74f344e8638706f6ee5b6c583a8"} err="failed to get container status \"edd860e36790382330f50b74b865128facc0d74f344e8638706f6ee5b6c583a8\": rpc error: code = NotFound desc = could not find container \"edd860e36790382330f50b74b865128facc0d74f344e8638706f6ee5b6c583a8\": container with ID starting with edd860e36790382330f50b74b865128facc0d74f344e8638706f6ee5b6c583a8 not found: ID does not exist" Jan 30 12:23:03 crc kubenswrapper[4703]: I0130 12:23:03.103157 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f269a22a-2cfd-45ca-a034-15bfc9ed7841" path="/var/lib/kubelet/pods/f269a22a-2cfd-45ca-a034-15bfc9ed7841/volumes" Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.119678 4703 generic.go:334] "Generic (PLEG): container finished" podID="662a3210-b968-437a-90d3-4d5035fc40e4" containerID="7464820f20d2623adf5c0b4f37bc7205ed570493fcce389ca912f434c7a4de1c" exitCode=137 Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.119768 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"662a3210-b968-437a-90d3-4d5035fc40e4","Type":"ContainerDied","Data":"7464820f20d2623adf5c0b4f37bc7205ed570493fcce389ca912f434c7a4de1c"} Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.257307 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.419225 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6k9kq\" (UniqueName: \"kubernetes.io/projected/662a3210-b968-437a-90d3-4d5035fc40e4-kube-api-access-6k9kq\") pod \"662a3210-b968-437a-90d3-4d5035fc40e4\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.419796 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/662a3210-b968-437a-90d3-4d5035fc40e4-log-httpd\") pod \"662a3210-b968-437a-90d3-4d5035fc40e4\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.420060 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-scripts\") pod \"662a3210-b968-437a-90d3-4d5035fc40e4\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.420450 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/662a3210-b968-437a-90d3-4d5035fc40e4-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "662a3210-b968-437a-90d3-4d5035fc40e4" (UID: "662a3210-b968-437a-90d3-4d5035fc40e4"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.421030 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-config-data\") pod \"662a3210-b968-437a-90d3-4d5035fc40e4\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.421183 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-sg-core-conf-yaml\") pod \"662a3210-b968-437a-90d3-4d5035fc40e4\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.421386 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/662a3210-b968-437a-90d3-4d5035fc40e4-run-httpd\") pod \"662a3210-b968-437a-90d3-4d5035fc40e4\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.421533 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-combined-ca-bundle\") pod \"662a3210-b968-437a-90d3-4d5035fc40e4\" (UID: \"662a3210-b968-437a-90d3-4d5035fc40e4\") " Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.422113 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/662a3210-b968-437a-90d3-4d5035fc40e4-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "662a3210-b968-437a-90d3-4d5035fc40e4" (UID: "662a3210-b968-437a-90d3-4d5035fc40e4"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.422791 4703 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/662a3210-b968-437a-90d3-4d5035fc40e4-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.431640 4703 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/662a3210-b968-437a-90d3-4d5035fc40e4-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.446517 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/662a3210-b968-437a-90d3-4d5035fc40e4-kube-api-access-6k9kq" (OuterVolumeSpecName: "kube-api-access-6k9kq") pod "662a3210-b968-437a-90d3-4d5035fc40e4" (UID: "662a3210-b968-437a-90d3-4d5035fc40e4"). InnerVolumeSpecName "kube-api-access-6k9kq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.453042 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-scripts" (OuterVolumeSpecName: "scripts") pod "662a3210-b968-437a-90d3-4d5035fc40e4" (UID: "662a3210-b968-437a-90d3-4d5035fc40e4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.503646 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "662a3210-b968-437a-90d3-4d5035fc40e4" (UID: "662a3210-b968-437a-90d3-4d5035fc40e4"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.534148 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.534245 4703 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.534266 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6k9kq\" (UniqueName: \"kubernetes.io/projected/662a3210-b968-437a-90d3-4d5035fc40e4-kube-api-access-6k9kq\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.574330 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-config-data" (OuterVolumeSpecName: "config-data") pod "662a3210-b968-437a-90d3-4d5035fc40e4" (UID: "662a3210-b968-437a-90d3-4d5035fc40e4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.581879 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "662a3210-b968-437a-90d3-4d5035fc40e4" (UID: "662a3210-b968-437a-90d3-4d5035fc40e4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.635399 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:07 crc kubenswrapper[4703]: I0130 12:23:07.635444 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/662a3210-b968-437a-90d3-4d5035fc40e4-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.136572 4703 generic.go:334] "Generic (PLEG): container finished" podID="7284929e-736f-4b9e-bce6-b2128abd47fc" containerID="564df5fc426738750afcd0688e8875ff98cb65017665db03b209bc8c51021e9c" exitCode=0 Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.136672 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-2cgdm" event={"ID":"7284929e-736f-4b9e-bce6-b2128abd47fc","Type":"ContainerDied","Data":"564df5fc426738750afcd0688e8875ff98cb65017665db03b209bc8c51021e9c"} Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.143206 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"662a3210-b968-437a-90d3-4d5035fc40e4","Type":"ContainerDied","Data":"626f11ef399c4376c11b38dc819f8ce4747fb8acc13935596949c416aece0d4a"} Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.143294 4703 scope.go:117] "RemoveContainer" containerID="7464820f20d2623adf5c0b4f37bc7205ed570493fcce389ca912f434c7a4de1c" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.143514 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.206494 4703 scope.go:117] "RemoveContainer" containerID="9962edfb7ec0957700304dec4c5f84bed4b7d578b9f7df2bee6ed8342aa03742" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.208460 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.220920 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.272995 4703 scope.go:117] "RemoveContainer" containerID="2d426baa2f0703e35e59ca5f8a705f192571fd37541e5ab5f163c7216fdd5564" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.275521 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:23:08 crc kubenswrapper[4703]: E0130 12:23:08.276247 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" containerName="ceilometer-notification-agent" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.276275 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" containerName="ceilometer-notification-agent" Jan 30 12:23:08 crc kubenswrapper[4703]: E0130 12:23:08.276294 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f269a22a-2cfd-45ca-a034-15bfc9ed7841" containerName="registry-server" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.276309 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f269a22a-2cfd-45ca-a034-15bfc9ed7841" containerName="registry-server" Jan 30 12:23:08 crc kubenswrapper[4703]: E0130 12:23:08.276334 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" containerName="ceilometer-central-agent" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.276344 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" containerName="ceilometer-central-agent" Jan 30 12:23:08 crc kubenswrapper[4703]: E0130 12:23:08.276373 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" containerName="proxy-httpd" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.276383 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" containerName="proxy-httpd" Jan 30 12:23:08 crc kubenswrapper[4703]: E0130 12:23:08.276396 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f269a22a-2cfd-45ca-a034-15bfc9ed7841" containerName="extract-utilities" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.276412 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f269a22a-2cfd-45ca-a034-15bfc9ed7841" containerName="extract-utilities" Jan 30 12:23:08 crc kubenswrapper[4703]: E0130 12:23:08.276425 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" containerName="sg-core" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.276434 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" containerName="sg-core" Jan 30 12:23:08 crc kubenswrapper[4703]: E0130 12:23:08.276446 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f269a22a-2cfd-45ca-a034-15bfc9ed7841" containerName="extract-content" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.276453 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f269a22a-2cfd-45ca-a034-15bfc9ed7841" containerName="extract-content" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.276746 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" containerName="ceilometer-notification-agent" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.276770 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" containerName="proxy-httpd" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.276783 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="f269a22a-2cfd-45ca-a034-15bfc9ed7841" containerName="registry-server" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.276795 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" containerName="ceilometer-central-agent" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.276812 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" containerName="sg-core" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.282495 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.285075 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.285432 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.290292 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.337822 4703 scope.go:117] "RemoveContainer" containerID="849f1dd9022e0c5781c822e893a747fdc6b87178a3e7765396f2e1fa79b8eea3" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.351009 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-run-httpd\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.351175 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-config-data\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.351425 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdhp7\" (UniqueName: \"kubernetes.io/projected/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-kube-api-access-mdhp7\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.351604 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.351674 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.351834 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-scripts\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.352059 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-log-httpd\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.454924 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdhp7\" (UniqueName: \"kubernetes.io/projected/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-kube-api-access-mdhp7\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.455041 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.455081 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.455147 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-scripts\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.455215 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-log-httpd\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.455261 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-run-httpd\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.455324 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-config-data\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.456288 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-log-httpd\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.456309 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-run-httpd\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.462606 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.462872 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-scripts\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.463197 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.464238 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-config-data\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.488459 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdhp7\" (UniqueName: \"kubernetes.io/projected/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-kube-api-access-mdhp7\") pod \"ceilometer-0\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " pod="openstack/ceilometer-0" Jan 30 12:23:08 crc kubenswrapper[4703]: I0130 12:23:08.617895 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:23:09 crc kubenswrapper[4703]: I0130 12:23:09.101049 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="662a3210-b968-437a-90d3-4d5035fc40e4" path="/var/lib/kubelet/pods/662a3210-b968-437a-90d3-4d5035fc40e4/volumes" Jan 30 12:23:09 crc kubenswrapper[4703]: I0130 12:23:09.183873 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:23:09 crc kubenswrapper[4703]: I0130 12:23:09.603282 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-2cgdm" Jan 30 12:23:09 crc kubenswrapper[4703]: I0130 12:23:09.788730 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-config-data\") pod \"7284929e-736f-4b9e-bce6-b2128abd47fc\" (UID: \"7284929e-736f-4b9e-bce6-b2128abd47fc\") " Jan 30 12:23:09 crc kubenswrapper[4703]: I0130 12:23:09.789241 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7lzwm\" (UniqueName: \"kubernetes.io/projected/7284929e-736f-4b9e-bce6-b2128abd47fc-kube-api-access-7lzwm\") pod \"7284929e-736f-4b9e-bce6-b2128abd47fc\" (UID: \"7284929e-736f-4b9e-bce6-b2128abd47fc\") " Jan 30 12:23:09 crc kubenswrapper[4703]: I0130 12:23:09.789294 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-scripts\") pod \"7284929e-736f-4b9e-bce6-b2128abd47fc\" (UID: \"7284929e-736f-4b9e-bce6-b2128abd47fc\") " Jan 30 12:23:09 crc kubenswrapper[4703]: I0130 12:23:09.789384 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-combined-ca-bundle\") pod \"7284929e-736f-4b9e-bce6-b2128abd47fc\" (UID: \"7284929e-736f-4b9e-bce6-b2128abd47fc\") " Jan 30 12:23:09 crc kubenswrapper[4703]: I0130 12:23:09.796145 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7284929e-736f-4b9e-bce6-b2128abd47fc-kube-api-access-7lzwm" (OuterVolumeSpecName: "kube-api-access-7lzwm") pod "7284929e-736f-4b9e-bce6-b2128abd47fc" (UID: "7284929e-736f-4b9e-bce6-b2128abd47fc"). InnerVolumeSpecName "kube-api-access-7lzwm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:23:09 crc kubenswrapper[4703]: I0130 12:23:09.804612 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-scripts" (OuterVolumeSpecName: "scripts") pod "7284929e-736f-4b9e-bce6-b2128abd47fc" (UID: "7284929e-736f-4b9e-bce6-b2128abd47fc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:09 crc kubenswrapper[4703]: I0130 12:23:09.821739 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-config-data" (OuterVolumeSpecName: "config-data") pod "7284929e-736f-4b9e-bce6-b2128abd47fc" (UID: "7284929e-736f-4b9e-bce6-b2128abd47fc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:09 crc kubenswrapper[4703]: I0130 12:23:09.826978 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7284929e-736f-4b9e-bce6-b2128abd47fc" (UID: "7284929e-736f-4b9e-bce6-b2128abd47fc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:09 crc kubenswrapper[4703]: I0130 12:23:09.891471 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:09 crc kubenswrapper[4703]: I0130 12:23:09.891532 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7lzwm\" (UniqueName: \"kubernetes.io/projected/7284929e-736f-4b9e-bce6-b2128abd47fc-kube-api-access-7lzwm\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:09 crc kubenswrapper[4703]: I0130 12:23:09.891546 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:09 crc kubenswrapper[4703]: I0130 12:23:09.891558 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7284929e-736f-4b9e-bce6-b2128abd47fc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.170985 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef","Type":"ContainerStarted","Data":"e1e147ef7809d57d14a7fe8bec657869126318174307d825b49360370c642912"} Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.173651 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-2cgdm" event={"ID":"7284929e-736f-4b9e-bce6-b2128abd47fc","Type":"ContainerDied","Data":"6ffe5bc7311e3a14f6ce85cbf82272c24c763a1ed0e756fe7291b0c63669ff0b"} Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.173720 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ffe5bc7311e3a14f6ce85cbf82272c24c763a1ed0e756fe7291b0c63669ff0b" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.173797 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-2cgdm" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.307247 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 30 12:23:10 crc kubenswrapper[4703]: E0130 12:23:10.307893 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7284929e-736f-4b9e-bce6-b2128abd47fc" containerName="nova-cell0-conductor-db-sync" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.307917 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="7284929e-736f-4b9e-bce6-b2128abd47fc" containerName="nova-cell0-conductor-db-sync" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.308225 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="7284929e-736f-4b9e-bce6-b2128abd47fc" containerName="nova-cell0-conductor-db-sync" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.309317 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.312451 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-ngdwl" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.312525 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.319836 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.404810 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e48a9fe8-36bf-41e5-920e-116bb6237828-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"e48a9fe8-36bf-41e5-920e-116bb6237828\") " pod="openstack/nova-cell0-conductor-0" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.404927 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e48a9fe8-36bf-41e5-920e-116bb6237828-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"e48a9fe8-36bf-41e5-920e-116bb6237828\") " pod="openstack/nova-cell0-conductor-0" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.404994 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2w7w\" (UniqueName: \"kubernetes.io/projected/e48a9fe8-36bf-41e5-920e-116bb6237828-kube-api-access-b2w7w\") pod \"nova-cell0-conductor-0\" (UID: \"e48a9fe8-36bf-41e5-920e-116bb6237828\") " pod="openstack/nova-cell0-conductor-0" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.501954 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5f9958979d-8h859" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.159:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.159:8443: connect: connection refused" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.502566 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.507890 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e48a9fe8-36bf-41e5-920e-116bb6237828-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"e48a9fe8-36bf-41e5-920e-116bb6237828\") " pod="openstack/nova-cell0-conductor-0" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.508029 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e48a9fe8-36bf-41e5-920e-116bb6237828-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"e48a9fe8-36bf-41e5-920e-116bb6237828\") " pod="openstack/nova-cell0-conductor-0" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.508111 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2w7w\" (UniqueName: \"kubernetes.io/projected/e48a9fe8-36bf-41e5-920e-116bb6237828-kube-api-access-b2w7w\") pod \"nova-cell0-conductor-0\" (UID: \"e48a9fe8-36bf-41e5-920e-116bb6237828\") " pod="openstack/nova-cell0-conductor-0" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.514478 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e48a9fe8-36bf-41e5-920e-116bb6237828-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"e48a9fe8-36bf-41e5-920e-116bb6237828\") " pod="openstack/nova-cell0-conductor-0" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.515031 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e48a9fe8-36bf-41e5-920e-116bb6237828-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"e48a9fe8-36bf-41e5-920e-116bb6237828\") " pod="openstack/nova-cell0-conductor-0" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.537284 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2w7w\" (UniqueName: \"kubernetes.io/projected/e48a9fe8-36bf-41e5-920e-116bb6237828-kube-api-access-b2w7w\") pod \"nova-cell0-conductor-0\" (UID: \"e48a9fe8-36bf-41e5-920e-116bb6237828\") " pod="openstack/nova-cell0-conductor-0" Jan 30 12:23:10 crc kubenswrapper[4703]: I0130 12:23:10.634573 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 30 12:23:11 crc kubenswrapper[4703]: I0130 12:23:11.384861 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 30 12:23:11 crc kubenswrapper[4703]: W0130 12:23:11.388818 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode48a9fe8_36bf_41e5_920e_116bb6237828.slice/crio-28caa2848f8bae3cdbcd77a5856443fbd33555f9ff2675364d4852a918678513 WatchSource:0}: Error finding container 28caa2848f8bae3cdbcd77a5856443fbd33555f9ff2675364d4852a918678513: Status 404 returned error can't find the container with id 28caa2848f8bae3cdbcd77a5856443fbd33555f9ff2675364d4852a918678513 Jan 30 12:23:11 crc kubenswrapper[4703]: I0130 12:23:11.680786 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-nzw46" Jan 30 12:23:11 crc kubenswrapper[4703]: I0130 12:23:11.745476 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nzw46"] Jan 30 12:23:12 crc kubenswrapper[4703]: I0130 12:23:12.199423 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"e48a9fe8-36bf-41e5-920e-116bb6237828","Type":"ContainerStarted","Data":"fedd8982cefab7128752e82a8ecf80a8455616ee1f62d271b872e15b78acf6e8"} Jan 30 12:23:12 crc kubenswrapper[4703]: I0130 12:23:12.199507 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"e48a9fe8-36bf-41e5-920e-116bb6237828","Type":"ContainerStarted","Data":"28caa2848f8bae3cdbcd77a5856443fbd33555f9ff2675364d4852a918678513"} Jan 30 12:23:12 crc kubenswrapper[4703]: I0130 12:23:12.199556 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 30 12:23:12 crc kubenswrapper[4703]: I0130 12:23:12.202085 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef","Type":"ContainerStarted","Data":"36966515af4078e2e076520f849000cfa7ae40e1a6cf9753411648a7299761b6"} Jan 30 12:23:12 crc kubenswrapper[4703]: I0130 12:23:12.202545 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-nzw46" podUID="37bff1f1-8dec-4d2e-994b-757b83902321" containerName="registry-server" containerID="cri-o://3fbb404760851f02a2e260bf1a744dc574969285804230188133d5deacd6d7f2" gracePeriod=2 Jan 30 12:23:12 crc kubenswrapper[4703]: I0130 12:23:12.222751 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.222721951 podStartE2EDuration="2.222721951s" podCreationTimestamp="2026-01-30 12:23:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:23:12.222082315 +0000 UTC m=+1627.999903969" watchObservedRunningTime="2026-01-30 12:23:12.222721951 +0000 UTC m=+1628.000543605" Jan 30 12:23:12 crc kubenswrapper[4703]: E0130 12:23:12.791131 4703 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37bff1f1_8dec_4d2e_994b_757b83902321.slice/crio-conmon-3fbb404760851f02a2e260bf1a744dc574969285804230188133d5deacd6d7f2.scope\": RecentStats: unable to find data in memory cache]" Jan 30 12:23:13 crc kubenswrapper[4703]: I0130 12:23:13.220453 4703 generic.go:334] "Generic (PLEG): container finished" podID="37bff1f1-8dec-4d2e-994b-757b83902321" containerID="3fbb404760851f02a2e260bf1a744dc574969285804230188133d5deacd6d7f2" exitCode=0 Jan 30 12:23:13 crc kubenswrapper[4703]: I0130 12:23:13.220560 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nzw46" event={"ID":"37bff1f1-8dec-4d2e-994b-757b83902321","Type":"ContainerDied","Data":"3fbb404760851f02a2e260bf1a744dc574969285804230188133d5deacd6d7f2"} Jan 30 12:23:13 crc kubenswrapper[4703]: I0130 12:23:13.602630 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nzw46" Jan 30 12:23:13 crc kubenswrapper[4703]: I0130 12:23:13.701246 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37bff1f1-8dec-4d2e-994b-757b83902321-utilities\") pod \"37bff1f1-8dec-4d2e-994b-757b83902321\" (UID: \"37bff1f1-8dec-4d2e-994b-757b83902321\") " Jan 30 12:23:13 crc kubenswrapper[4703]: I0130 12:23:13.702105 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37bff1f1-8dec-4d2e-994b-757b83902321-catalog-content\") pod \"37bff1f1-8dec-4d2e-994b-757b83902321\" (UID: \"37bff1f1-8dec-4d2e-994b-757b83902321\") " Jan 30 12:23:13 crc kubenswrapper[4703]: I0130 12:23:13.702364 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2kzkt\" (UniqueName: \"kubernetes.io/projected/37bff1f1-8dec-4d2e-994b-757b83902321-kube-api-access-2kzkt\") pod \"37bff1f1-8dec-4d2e-994b-757b83902321\" (UID: \"37bff1f1-8dec-4d2e-994b-757b83902321\") " Jan 30 12:23:13 crc kubenswrapper[4703]: I0130 12:23:13.702567 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37bff1f1-8dec-4d2e-994b-757b83902321-utilities" (OuterVolumeSpecName: "utilities") pod "37bff1f1-8dec-4d2e-994b-757b83902321" (UID: "37bff1f1-8dec-4d2e-994b-757b83902321"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:23:13 crc kubenswrapper[4703]: I0130 12:23:13.703140 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37bff1f1-8dec-4d2e-994b-757b83902321-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:13 crc kubenswrapper[4703]: I0130 12:23:13.711542 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37bff1f1-8dec-4d2e-994b-757b83902321-kube-api-access-2kzkt" (OuterVolumeSpecName: "kube-api-access-2kzkt") pod "37bff1f1-8dec-4d2e-994b-757b83902321" (UID: "37bff1f1-8dec-4d2e-994b-757b83902321"). InnerVolumeSpecName "kube-api-access-2kzkt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:23:13 crc kubenswrapper[4703]: I0130 12:23:13.752108 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37bff1f1-8dec-4d2e-994b-757b83902321-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "37bff1f1-8dec-4d2e-994b-757b83902321" (UID: "37bff1f1-8dec-4d2e-994b-757b83902321"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:23:13 crc kubenswrapper[4703]: I0130 12:23:13.804916 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37bff1f1-8dec-4d2e-994b-757b83902321-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:13 crc kubenswrapper[4703]: I0130 12:23:13.804980 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2kzkt\" (UniqueName: \"kubernetes.io/projected/37bff1f1-8dec-4d2e-994b-757b83902321-kube-api-access-2kzkt\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:14 crc kubenswrapper[4703]: I0130 12:23:14.235041 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef","Type":"ContainerStarted","Data":"163c6fb9f14a050376cbbacf469f4c8d7803e870094ed88896666efa17df373f"} Jan 30 12:23:14 crc kubenswrapper[4703]: I0130 12:23:14.235100 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef","Type":"ContainerStarted","Data":"02dedaa78642f3180e90f003393dc0b541ff490fa8875c09df6abcb405e4459a"} Jan 30 12:23:14 crc kubenswrapper[4703]: I0130 12:23:14.238182 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nzw46" event={"ID":"37bff1f1-8dec-4d2e-994b-757b83902321","Type":"ContainerDied","Data":"cedbd038604eeb1a038a5b44742e4a6da87ad663e441f6899cc378cc82e8d6af"} Jan 30 12:23:14 crc kubenswrapper[4703]: I0130 12:23:14.238267 4703 scope.go:117] "RemoveContainer" containerID="3fbb404760851f02a2e260bf1a744dc574969285804230188133d5deacd6d7f2" Jan 30 12:23:14 crc kubenswrapper[4703]: I0130 12:23:14.238268 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nzw46" Jan 30 12:23:14 crc kubenswrapper[4703]: I0130 12:23:14.282849 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nzw46"] Jan 30 12:23:14 crc kubenswrapper[4703]: I0130 12:23:14.289072 4703 scope.go:117] "RemoveContainer" containerID="de812ddbc0922c4e21eeeab53b845677aeb899799cb1ef81a47024dab5c1f64d" Jan 30 12:23:14 crc kubenswrapper[4703]: I0130 12:23:14.300391 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-nzw46"] Jan 30 12:23:14 crc kubenswrapper[4703]: I0130 12:23:14.335921 4703 scope.go:117] "RemoveContainer" containerID="7b51e23f8924fb06b51551cc64c14485b831369060adc28fdf51d41e18f8a8d1" Jan 30 12:23:15 crc kubenswrapper[4703]: I0130 12:23:15.101243 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37bff1f1-8dec-4d2e-994b-757b83902321" path="/var/lib/kubelet/pods/37bff1f1-8dec-4d2e-994b-757b83902321/volumes" Jan 30 12:23:17 crc kubenswrapper[4703]: I0130 12:23:17.537527 4703 generic.go:334] "Generic (PLEG): container finished" podID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerID="515f8dda2bc0f9be8dcdb24b41cdf2299f04b0afc161bd211413cc06590da029" exitCode=137 Jan 30 12:23:17 crc kubenswrapper[4703]: I0130 12:23:17.537648 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5f9958979d-8h859" event={"ID":"b888ea51-970d-4f4d-9e5c-f456ca173472","Type":"ContainerDied","Data":"515f8dda2bc0f9be8dcdb24b41cdf2299f04b0afc161bd211413cc06590da029"} Jan 30 12:23:17 crc kubenswrapper[4703]: I0130 12:23:17.551160 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef","Type":"ContainerStarted","Data":"b8982760796eb97198d793f674a5e4cb4266ddbd5e94dfa6477a2fb2a86e62d0"} Jan 30 12:23:17 crc kubenswrapper[4703]: I0130 12:23:17.551558 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 30 12:23:17 crc kubenswrapper[4703]: I0130 12:23:17.592109 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.096437909 podStartE2EDuration="9.592083649s" podCreationTimestamp="2026-01-30 12:23:08 +0000 UTC" firstStartedPulling="2026-01-30 12:23:09.193684798 +0000 UTC m=+1624.971506462" lastFinishedPulling="2026-01-30 12:23:16.689330548 +0000 UTC m=+1632.467152202" observedRunningTime="2026-01-30 12:23:17.58327442 +0000 UTC m=+1633.361096064" watchObservedRunningTime="2026-01-30 12:23:17.592083649 +0000 UTC m=+1633.369905303" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.457521 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.583241 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5f9958979d-8h859" event={"ID":"b888ea51-970d-4f4d-9e5c-f456ca173472","Type":"ContainerDied","Data":"189e26c4eab30971a78885710777ee2e642a2ef52c043040fedd5dfcdddb7433"} Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.583268 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5f9958979d-8h859" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.583671 4703 scope.go:117] "RemoveContainer" containerID="12c6e01d1ec7cf675be0982e70d170f25953b74704982af7f66aaded402ddb28" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.659071 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b888ea51-970d-4f4d-9e5c-f456ca173472-scripts\") pod \"b888ea51-970d-4f4d-9e5c-f456ca173472\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.659175 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-horizon-secret-key\") pod \"b888ea51-970d-4f4d-9e5c-f456ca173472\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.659219 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-combined-ca-bundle\") pod \"b888ea51-970d-4f4d-9e5c-f456ca173472\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.659421 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bpxgw\" (UniqueName: \"kubernetes.io/projected/b888ea51-970d-4f4d-9e5c-f456ca173472-kube-api-access-bpxgw\") pod \"b888ea51-970d-4f4d-9e5c-f456ca173472\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.659504 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b888ea51-970d-4f4d-9e5c-f456ca173472-logs\") pod \"b888ea51-970d-4f4d-9e5c-f456ca173472\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.659531 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-horizon-tls-certs\") pod \"b888ea51-970d-4f4d-9e5c-f456ca173472\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.659614 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b888ea51-970d-4f4d-9e5c-f456ca173472-config-data\") pod \"b888ea51-970d-4f4d-9e5c-f456ca173472\" (UID: \"b888ea51-970d-4f4d-9e5c-f456ca173472\") " Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.670898 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b888ea51-970d-4f4d-9e5c-f456ca173472-logs" (OuterVolumeSpecName: "logs") pod "b888ea51-970d-4f4d-9e5c-f456ca173472" (UID: "b888ea51-970d-4f4d-9e5c-f456ca173472"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.675784 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "b888ea51-970d-4f4d-9e5c-f456ca173472" (UID: "b888ea51-970d-4f4d-9e5c-f456ca173472"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.691433 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b888ea51-970d-4f4d-9e5c-f456ca173472-kube-api-access-bpxgw" (OuterVolumeSpecName: "kube-api-access-bpxgw") pod "b888ea51-970d-4f4d-9e5c-f456ca173472" (UID: "b888ea51-970d-4f4d-9e5c-f456ca173472"). InnerVolumeSpecName "kube-api-access-bpxgw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.716484 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b888ea51-970d-4f4d-9e5c-f456ca173472-config-data" (OuterVolumeSpecName: "config-data") pod "b888ea51-970d-4f4d-9e5c-f456ca173472" (UID: "b888ea51-970d-4f4d-9e5c-f456ca173472"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.719498 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b888ea51-970d-4f4d-9e5c-f456ca173472" (UID: "b888ea51-970d-4f4d-9e5c-f456ca173472"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.740509 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b888ea51-970d-4f4d-9e5c-f456ca173472-scripts" (OuterVolumeSpecName: "scripts") pod "b888ea51-970d-4f4d-9e5c-f456ca173472" (UID: "b888ea51-970d-4f4d-9e5c-f456ca173472"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.770015 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b888ea51-970d-4f4d-9e5c-f456ca173472-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.770052 4703 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.770064 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.770081 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bpxgw\" (UniqueName: \"kubernetes.io/projected/b888ea51-970d-4f4d-9e5c-f456ca173472-kube-api-access-bpxgw\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.770093 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b888ea51-970d-4f4d-9e5c-f456ca173472-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.770103 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b888ea51-970d-4f4d-9e5c-f456ca173472-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.782665 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "b888ea51-970d-4f4d-9e5c-f456ca173472" (UID: "b888ea51-970d-4f4d-9e5c-f456ca173472"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.853898 4703 scope.go:117] "RemoveContainer" containerID="515f8dda2bc0f9be8dcdb24b41cdf2299f04b0afc161bd211413cc06590da029" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.872759 4703 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/b888ea51-970d-4f4d-9e5c-f456ca173472-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.932710 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5f9958979d-8h859"] Jan 30 12:23:18 crc kubenswrapper[4703]: I0130 12:23:18.943989 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5f9958979d-8h859"] Jan 30 12:23:19 crc kubenswrapper[4703]: I0130 12:23:19.100664 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" path="/var/lib/kubelet/pods/b888ea51-970d-4f4d-9e5c-f456ca173472/volumes" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.213513 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.800337 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-d9t7g"] Jan 30 12:23:21 crc kubenswrapper[4703]: E0130 12:23:21.801271 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.801294 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" Jan 30 12:23:21 crc kubenswrapper[4703]: E0130 12:23:21.801306 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.801313 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" Jan 30 12:23:21 crc kubenswrapper[4703]: E0130 12:23:21.801321 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37bff1f1-8dec-4d2e-994b-757b83902321" containerName="extract-utilities" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.801328 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="37bff1f1-8dec-4d2e-994b-757b83902321" containerName="extract-utilities" Jan 30 12:23:21 crc kubenswrapper[4703]: E0130 12:23:21.801351 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37bff1f1-8dec-4d2e-994b-757b83902321" containerName="registry-server" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.801357 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="37bff1f1-8dec-4d2e-994b-757b83902321" containerName="registry-server" Jan 30 12:23:21 crc kubenswrapper[4703]: E0130 12:23:21.801374 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon-log" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.801380 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon-log" Jan 30 12:23:21 crc kubenswrapper[4703]: E0130 12:23:21.801391 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37bff1f1-8dec-4d2e-994b-757b83902321" containerName="extract-content" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.801398 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="37bff1f1-8dec-4d2e-994b-757b83902321" containerName="extract-content" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.801595 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.801611 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="37bff1f1-8dec-4d2e-994b-757b83902321" containerName="registry-server" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.801623 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon-log" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.801634 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.804086 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-d9t7g" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.807147 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.811257 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.816660 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-d9t7g"] Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.895650 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-scripts\") pod \"nova-cell0-cell-mapping-d9t7g\" (UID: \"93b18574-a5a2-4317-b40d-eb021590ac96\") " pod="openstack/nova-cell0-cell-mapping-d9t7g" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.895874 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmt2j\" (UniqueName: \"kubernetes.io/projected/93b18574-a5a2-4317-b40d-eb021590ac96-kube-api-access-cmt2j\") pod \"nova-cell0-cell-mapping-d9t7g\" (UID: \"93b18574-a5a2-4317-b40d-eb021590ac96\") " pod="openstack/nova-cell0-cell-mapping-d9t7g" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.896007 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-d9t7g\" (UID: \"93b18574-a5a2-4317-b40d-eb021590ac96\") " pod="openstack/nova-cell0-cell-mapping-d9t7g" Jan 30 12:23:21 crc kubenswrapper[4703]: I0130 12:23:21.896305 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-config-data\") pod \"nova-cell0-cell-mapping-d9t7g\" (UID: \"93b18574-a5a2-4317-b40d-eb021590ac96\") " pod="openstack/nova-cell0-cell-mapping-d9t7g" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:21.999245 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmt2j\" (UniqueName: \"kubernetes.io/projected/93b18574-a5a2-4317-b40d-eb021590ac96-kube-api-access-cmt2j\") pod \"nova-cell0-cell-mapping-d9t7g\" (UID: \"93b18574-a5a2-4317-b40d-eb021590ac96\") " pod="openstack/nova-cell0-cell-mapping-d9t7g" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:21.999342 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-d9t7g\" (UID: \"93b18574-a5a2-4317-b40d-eb021590ac96\") " pod="openstack/nova-cell0-cell-mapping-d9t7g" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:21.999486 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-config-data\") pod \"nova-cell0-cell-mapping-d9t7g\" (UID: \"93b18574-a5a2-4317-b40d-eb021590ac96\") " pod="openstack/nova-cell0-cell-mapping-d9t7g" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:21.999613 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-scripts\") pod \"nova-cell0-cell-mapping-d9t7g\" (UID: \"93b18574-a5a2-4317-b40d-eb021590ac96\") " pod="openstack/nova-cell0-cell-mapping-d9t7g" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.009421 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-scripts\") pod \"nova-cell0-cell-mapping-d9t7g\" (UID: \"93b18574-a5a2-4317-b40d-eb021590ac96\") " pod="openstack/nova-cell0-cell-mapping-d9t7g" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.018056 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-config-data\") pod \"nova-cell0-cell-mapping-d9t7g\" (UID: \"93b18574-a5a2-4317-b40d-eb021590ac96\") " pod="openstack/nova-cell0-cell-mapping-d9t7g" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.022947 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-d9t7g\" (UID: \"93b18574-a5a2-4317-b40d-eb021590ac96\") " pod="openstack/nova-cell0-cell-mapping-d9t7g" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.032495 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmt2j\" (UniqueName: \"kubernetes.io/projected/93b18574-a5a2-4317-b40d-eb021590ac96-kube-api-access-cmt2j\") pod \"nova-cell0-cell-mapping-d9t7g\" (UID: \"93b18574-a5a2-4317-b40d-eb021590ac96\") " pod="openstack/nova-cell0-cell-mapping-d9t7g" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.091169 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 30 12:23:22 crc kubenswrapper[4703]: E0130 12:23:22.091739 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.091766 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.092058 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="b888ea51-970d-4f4d-9e5c-f456ca173472" containerName="horizon" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.107155 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.113821 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.119263 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.128249 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-d9t7g" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.206691 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rdbxn\" (UniqueName: \"kubernetes.io/projected/391692f9-2c2e-48fc-8dda-74311b39393e-kube-api-access-rdbxn\") pod \"nova-api-0\" (UID: \"391692f9-2c2e-48fc-8dda-74311b39393e\") " pod="openstack/nova-api-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.207712 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/391692f9-2c2e-48fc-8dda-74311b39393e-config-data\") pod \"nova-api-0\" (UID: \"391692f9-2c2e-48fc-8dda-74311b39393e\") " pod="openstack/nova-api-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.207823 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/391692f9-2c2e-48fc-8dda-74311b39393e-logs\") pod \"nova-api-0\" (UID: \"391692f9-2c2e-48fc-8dda-74311b39393e\") " pod="openstack/nova-api-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.207982 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/391692f9-2c2e-48fc-8dda-74311b39393e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"391692f9-2c2e-48fc-8dda-74311b39393e\") " pod="openstack/nova-api-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.228225 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.230408 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.243067 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.309793 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/391692f9-2c2e-48fc-8dda-74311b39393e-config-data\") pod \"nova-api-0\" (UID: \"391692f9-2c2e-48fc-8dda-74311b39393e\") " pod="openstack/nova-api-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.309856 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/391692f9-2c2e-48fc-8dda-74311b39393e-logs\") pod \"nova-api-0\" (UID: \"391692f9-2c2e-48fc-8dda-74311b39393e\") " pod="openstack/nova-api-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.309928 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/391692f9-2c2e-48fc-8dda-74311b39393e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"391692f9-2c2e-48fc-8dda-74311b39393e\") " pod="openstack/nova-api-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.310016 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdbxn\" (UniqueName: \"kubernetes.io/projected/391692f9-2c2e-48fc-8dda-74311b39393e-kube-api-access-rdbxn\") pod \"nova-api-0\" (UID: \"391692f9-2c2e-48fc-8dda-74311b39393e\") " pod="openstack/nova-api-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.313186 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/391692f9-2c2e-48fc-8dda-74311b39393e-logs\") pod \"nova-api-0\" (UID: \"391692f9-2c2e-48fc-8dda-74311b39393e\") " pod="openstack/nova-api-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.320959 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/391692f9-2c2e-48fc-8dda-74311b39393e-config-data\") pod \"nova-api-0\" (UID: \"391692f9-2c2e-48fc-8dda-74311b39393e\") " pod="openstack/nova-api-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.324029 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/391692f9-2c2e-48fc-8dda-74311b39393e-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"391692f9-2c2e-48fc-8dda-74311b39393e\") " pod="openstack/nova-api-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.329709 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.365214 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdbxn\" (UniqueName: \"kubernetes.io/projected/391692f9-2c2e-48fc-8dda-74311b39393e-kube-api-access-rdbxn\") pod \"nova-api-0\" (UID: \"391692f9-2c2e-48fc-8dda-74311b39393e\") " pod="openstack/nova-api-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.389484 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.391504 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.403799 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.411962 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9321c04-45e3-40ed-9d9d-bd6b1221c265-config-data\") pod \"nova-scheduler-0\" (UID: \"e9321c04-45e3-40ed-9d9d-bd6b1221c265\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.412379 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnmhm\" (UniqueName: \"kubernetes.io/projected/e9321c04-45e3-40ed-9d9d-bd6b1221c265-kube-api-access-pnmhm\") pod \"nova-scheduler-0\" (UID: \"e9321c04-45e3-40ed-9d9d-bd6b1221c265\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.412638 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9321c04-45e3-40ed-9d9d-bd6b1221c265-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e9321c04-45e3-40ed-9d9d-bd6b1221c265\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.435216 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.495914 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.498564 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.508858 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.522944 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9321c04-45e3-40ed-9d9d-bd6b1221c265-config-data\") pod \"nova-scheduler-0\" (UID: \"e9321c04-45e3-40ed-9d9d-bd6b1221c265\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.523069 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/355a9e7b-78db-4e03-997d-acc6136442e4-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"355a9e7b-78db-4e03-997d-acc6136442e4\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.523340 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/355a9e7b-78db-4e03-997d-acc6136442e4-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"355a9e7b-78db-4e03-997d-acc6136442e4\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.523373 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnnz6\" (UniqueName: \"kubernetes.io/projected/355a9e7b-78db-4e03-997d-acc6136442e4-kube-api-access-fnnz6\") pod \"nova-cell1-novncproxy-0\" (UID: \"355a9e7b-78db-4e03-997d-acc6136442e4\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.523395 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnmhm\" (UniqueName: \"kubernetes.io/projected/e9321c04-45e3-40ed-9d9d-bd6b1221c265-kube-api-access-pnmhm\") pod \"nova-scheduler-0\" (UID: \"e9321c04-45e3-40ed-9d9d-bd6b1221c265\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.523619 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9321c04-45e3-40ed-9d9d-bd6b1221c265-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e9321c04-45e3-40ed-9d9d-bd6b1221c265\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.537096 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.556803 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9321c04-45e3-40ed-9d9d-bd6b1221c265-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e9321c04-45e3-40ed-9d9d-bd6b1221c265\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.561831 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9321c04-45e3-40ed-9d9d-bd6b1221c265-config-data\") pod \"nova-scheduler-0\" (UID: \"e9321c04-45e3-40ed-9d9d-bd6b1221c265\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.592054 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnmhm\" (UniqueName: \"kubernetes.io/projected/e9321c04-45e3-40ed-9d9d-bd6b1221c265-kube-api-access-pnmhm\") pod \"nova-scheduler-0\" (UID: \"e9321c04-45e3-40ed-9d9d-bd6b1221c265\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.603629 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.632208 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/227c7e91-67c4-46e3-b16d-ad10b3029e81-config-data\") pod \"nova-metadata-0\" (UID: \"227c7e91-67c4-46e3-b16d-ad10b3029e81\") " pod="openstack/nova-metadata-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.632560 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/227c7e91-67c4-46e3-b16d-ad10b3029e81-logs\") pod \"nova-metadata-0\" (UID: \"227c7e91-67c4-46e3-b16d-ad10b3029e81\") " pod="openstack/nova-metadata-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.632633 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/355a9e7b-78db-4e03-997d-acc6136442e4-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"355a9e7b-78db-4e03-997d-acc6136442e4\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.632919 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hp5c\" (UniqueName: \"kubernetes.io/projected/227c7e91-67c4-46e3-b16d-ad10b3029e81-kube-api-access-5hp5c\") pod \"nova-metadata-0\" (UID: \"227c7e91-67c4-46e3-b16d-ad10b3029e81\") " pod="openstack/nova-metadata-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.632984 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/355a9e7b-78db-4e03-997d-acc6136442e4-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"355a9e7b-78db-4e03-997d-acc6136442e4\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.633027 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnnz6\" (UniqueName: \"kubernetes.io/projected/355a9e7b-78db-4e03-997d-acc6136442e4-kube-api-access-fnnz6\") pod \"nova-cell1-novncproxy-0\" (UID: \"355a9e7b-78db-4e03-997d-acc6136442e4\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.634068 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-cccmv"] Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.638455 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/355a9e7b-78db-4e03-997d-acc6136442e4-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"355a9e7b-78db-4e03-997d-acc6136442e4\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.638734 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/227c7e91-67c4-46e3-b16d-ad10b3029e81-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"227c7e91-67c4-46e3-b16d-ad10b3029e81\") " pod="openstack/nova-metadata-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.639573 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.651406 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/355a9e7b-78db-4e03-997d-acc6136442e4-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"355a9e7b-78db-4e03-997d-acc6136442e4\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.668515 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnnz6\" (UniqueName: \"kubernetes.io/projected/355a9e7b-78db-4e03-997d-acc6136442e4-kube-api-access-fnnz6\") pod \"nova-cell1-novncproxy-0\" (UID: \"355a9e7b-78db-4e03-997d-acc6136442e4\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.668680 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-cccmv"] Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.694194 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.741264 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fkbn\" (UniqueName: \"kubernetes.io/projected/c8357d8b-c1e1-4411-92c9-4de33313aaeb-kube-api-access-5fkbn\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.741368 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-config\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.741429 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hp5c\" (UniqueName: \"kubernetes.io/projected/227c7e91-67c4-46e3-b16d-ad10b3029e81-kube-api-access-5hp5c\") pod \"nova-metadata-0\" (UID: \"227c7e91-67c4-46e3-b16d-ad10b3029e81\") " pod="openstack/nova-metadata-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.741467 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.741519 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-dns-svc\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.741558 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.741617 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/227c7e91-67c4-46e3-b16d-ad10b3029e81-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"227c7e91-67c4-46e3-b16d-ad10b3029e81\") " pod="openstack/nova-metadata-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.741675 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/227c7e91-67c4-46e3-b16d-ad10b3029e81-config-data\") pod \"nova-metadata-0\" (UID: \"227c7e91-67c4-46e3-b16d-ad10b3029e81\") " pod="openstack/nova-metadata-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.741723 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/227c7e91-67c4-46e3-b16d-ad10b3029e81-logs\") pod \"nova-metadata-0\" (UID: \"227c7e91-67c4-46e3-b16d-ad10b3029e81\") " pod="openstack/nova-metadata-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.741784 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.754698 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/227c7e91-67c4-46e3-b16d-ad10b3029e81-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"227c7e91-67c4-46e3-b16d-ad10b3029e81\") " pod="openstack/nova-metadata-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.755036 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/227c7e91-67c4-46e3-b16d-ad10b3029e81-logs\") pod \"nova-metadata-0\" (UID: \"227c7e91-67c4-46e3-b16d-ad10b3029e81\") " pod="openstack/nova-metadata-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.758970 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/227c7e91-67c4-46e3-b16d-ad10b3029e81-config-data\") pod \"nova-metadata-0\" (UID: \"227c7e91-67c4-46e3-b16d-ad10b3029e81\") " pod="openstack/nova-metadata-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.766499 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.769848 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hp5c\" (UniqueName: \"kubernetes.io/projected/227c7e91-67c4-46e3-b16d-ad10b3029e81-kube-api-access-5hp5c\") pod \"nova-metadata-0\" (UID: \"227c7e91-67c4-46e3-b16d-ad10b3029e81\") " pod="openstack/nova-metadata-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.852581 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-dns-svc\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.843525 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-dns-svc\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.853471 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.854313 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.854483 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fkbn\" (UniqueName: \"kubernetes.io/projected/c8357d8b-c1e1-4411-92c9-4de33313aaeb-kube-api-access-5fkbn\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.854552 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-config\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.854719 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.856018 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-ovsdbserver-sb\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.856724 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-dns-swift-storage-0\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.857462 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-ovsdbserver-nb\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.863218 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-config\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.882217 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 30 12:23:22 crc kubenswrapper[4703]: I0130 12:23:22.923494 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fkbn\" (UniqueName: \"kubernetes.io/projected/c8357d8b-c1e1-4411-92c9-4de33313aaeb-kube-api-access-5fkbn\") pod \"dnsmasq-dns-757b4f8459-cccmv\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:23 crc kubenswrapper[4703]: I0130 12:23:23.005497 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:23 crc kubenswrapper[4703]: I0130 12:23:23.163280 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-d9t7g"] Jan 30 12:23:23 crc kubenswrapper[4703]: I0130 12:23:23.570359 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 30 12:23:23 crc kubenswrapper[4703]: W0130 12:23:23.632300 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode9321c04_45e3_40ed_9d9d_bd6b1221c265.slice/crio-bdd52643fe89ee144f7b0a5b3d317edb59be2df3abdc96d1ce35f368e61f3a38 WatchSource:0}: Error finding container bdd52643fe89ee144f7b0a5b3d317edb59be2df3abdc96d1ce35f368e61f3a38: Status 404 returned error can't find the container with id bdd52643fe89ee144f7b0a5b3d317edb59be2df3abdc96d1ce35f368e61f3a38 Jan 30 12:23:23 crc kubenswrapper[4703]: I0130 12:23:23.666186 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 30 12:23:23 crc kubenswrapper[4703]: I0130 12:23:23.781405 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e9321c04-45e3-40ed-9d9d-bd6b1221c265","Type":"ContainerStarted","Data":"bdd52643fe89ee144f7b0a5b3d317edb59be2df3abdc96d1ce35f368e61f3a38"} Jan 30 12:23:23 crc kubenswrapper[4703]: I0130 12:23:23.802430 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-d9t7g" event={"ID":"93b18574-a5a2-4317-b40d-eb021590ac96","Type":"ContainerStarted","Data":"4fa3439ac44c3c03ea1c5bc34a09423b78a19d73aee17731c80fda97a36c4599"} Jan 30 12:23:23 crc kubenswrapper[4703]: I0130 12:23:23.831293 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"391692f9-2c2e-48fc-8dda-74311b39393e","Type":"ContainerStarted","Data":"98d7dd5cddd6a8504e3443492b1c17d866c7fb686b0269fdafe2ed9e3aa35ca5"} Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.017860 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.174254 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-wbn8k"] Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.176102 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-wbn8k" Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.181576 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.181645 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.209613 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-wbn8k"] Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.311478 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-config-data\") pod \"nova-cell1-conductor-db-sync-wbn8k\" (UID: \"40732e3a-846a-44ee-bf77-7c90e541af74\") " pod="openstack/nova-cell1-conductor-db-sync-wbn8k" Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.311534 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-scripts\") pod \"nova-cell1-conductor-db-sync-wbn8k\" (UID: \"40732e3a-846a-44ee-bf77-7c90e541af74\") " pod="openstack/nova-cell1-conductor-db-sync-wbn8k" Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.311608 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-wbn8k\" (UID: \"40732e3a-846a-44ee-bf77-7c90e541af74\") " pod="openstack/nova-cell1-conductor-db-sync-wbn8k" Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.311961 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rspp\" (UniqueName: \"kubernetes.io/projected/40732e3a-846a-44ee-bf77-7c90e541af74-kube-api-access-8rspp\") pod \"nova-cell1-conductor-db-sync-wbn8k\" (UID: \"40732e3a-846a-44ee-bf77-7c90e541af74\") " pod="openstack/nova-cell1-conductor-db-sync-wbn8k" Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.395556 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-cccmv"] Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.418701 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-wbn8k\" (UID: \"40732e3a-846a-44ee-bf77-7c90e541af74\") " pod="openstack/nova-cell1-conductor-db-sync-wbn8k" Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.418830 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rspp\" (UniqueName: \"kubernetes.io/projected/40732e3a-846a-44ee-bf77-7c90e541af74-kube-api-access-8rspp\") pod \"nova-cell1-conductor-db-sync-wbn8k\" (UID: \"40732e3a-846a-44ee-bf77-7c90e541af74\") " pod="openstack/nova-cell1-conductor-db-sync-wbn8k" Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.418999 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-config-data\") pod \"nova-cell1-conductor-db-sync-wbn8k\" (UID: \"40732e3a-846a-44ee-bf77-7c90e541af74\") " pod="openstack/nova-cell1-conductor-db-sync-wbn8k" Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.419029 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-scripts\") pod \"nova-cell1-conductor-db-sync-wbn8k\" (UID: \"40732e3a-846a-44ee-bf77-7c90e541af74\") " pod="openstack/nova-cell1-conductor-db-sync-wbn8k" Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.431976 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-scripts\") pod \"nova-cell1-conductor-db-sync-wbn8k\" (UID: \"40732e3a-846a-44ee-bf77-7c90e541af74\") " pod="openstack/nova-cell1-conductor-db-sync-wbn8k" Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.438313 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-wbn8k\" (UID: \"40732e3a-846a-44ee-bf77-7c90e541af74\") " pod="openstack/nova-cell1-conductor-db-sync-wbn8k" Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.470919 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rspp\" (UniqueName: \"kubernetes.io/projected/40732e3a-846a-44ee-bf77-7c90e541af74-kube-api-access-8rspp\") pod \"nova-cell1-conductor-db-sync-wbn8k\" (UID: \"40732e3a-846a-44ee-bf77-7c90e541af74\") " pod="openstack/nova-cell1-conductor-db-sync-wbn8k" Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.472165 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-config-data\") pod \"nova-cell1-conductor-db-sync-wbn8k\" (UID: \"40732e3a-846a-44ee-bf77-7c90e541af74\") " pod="openstack/nova-cell1-conductor-db-sync-wbn8k" Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.536968 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-wbn8k" Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.654544 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 30 12:23:24 crc kubenswrapper[4703]: W0130 12:23:24.683242 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod227c7e91_67c4_46e3_b16d_ad10b3029e81.slice/crio-7a8c38ece3f7cf7207d9008363e12fea8c0d714d28828642cfa3e50d145618eb WatchSource:0}: Error finding container 7a8c38ece3f7cf7207d9008363e12fea8c0d714d28828642cfa3e50d145618eb: Status 404 returned error can't find the container with id 7a8c38ece3f7cf7207d9008363e12fea8c0d714d28828642cfa3e50d145618eb Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.880288 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"227c7e91-67c4-46e3-b16d-ad10b3029e81","Type":"ContainerStarted","Data":"7a8c38ece3f7cf7207d9008363e12fea8c0d714d28828642cfa3e50d145618eb"} Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.886289 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-d9t7g" event={"ID":"93b18574-a5a2-4317-b40d-eb021590ac96","Type":"ContainerStarted","Data":"f88fad3c745401aea84ad3bfafd365ae7f8cc354c2b585857b1eb141f5688b84"} Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.902482 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"355a9e7b-78db-4e03-997d-acc6136442e4","Type":"ContainerStarted","Data":"f6accd44d0334349422265782d60472161e632f1089b0c43543c13f72e2ffd2f"} Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.925740 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-cccmv" event={"ID":"c8357d8b-c1e1-4411-92c9-4de33313aaeb","Type":"ContainerStarted","Data":"7776806fdae7c2480f7db42443abf5deec35b5bf8601c499efad9a1a1f1a81d9"} Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.925812 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-cccmv" event={"ID":"c8357d8b-c1e1-4411-92c9-4de33313aaeb","Type":"ContainerStarted","Data":"cf4350ffd91ce97eebc3bc0e19eb6d981e5793f46110d009ffc7048ff6d4ae7b"} Jan 30 12:23:24 crc kubenswrapper[4703]: I0130 12:23:24.984922 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-d9t7g" podStartSLOduration=3.984883472 podStartE2EDuration="3.984883472s" podCreationTimestamp="2026-01-30 12:23:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:23:24.917453226 +0000 UTC m=+1640.695274880" watchObservedRunningTime="2026-01-30 12:23:24.984883472 +0000 UTC m=+1640.762705126" Jan 30 12:23:25 crc kubenswrapper[4703]: W0130 12:23:25.311313 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod40732e3a_846a_44ee_bf77_7c90e541af74.slice/crio-50cd798d7b56ed2da2e633bf1b6584d0edecebb6e87c1195e3f8f5dec94b9756 WatchSource:0}: Error finding container 50cd798d7b56ed2da2e633bf1b6584d0edecebb6e87c1195e3f8f5dec94b9756: Status 404 returned error can't find the container with id 50cd798d7b56ed2da2e633bf1b6584d0edecebb6e87c1195e3f8f5dec94b9756 Jan 30 12:23:25 crc kubenswrapper[4703]: I0130 12:23:25.342291 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-wbn8k"] Jan 30 12:23:25 crc kubenswrapper[4703]: I0130 12:23:25.982538 4703 generic.go:334] "Generic (PLEG): container finished" podID="c8357d8b-c1e1-4411-92c9-4de33313aaeb" containerID="7776806fdae7c2480f7db42443abf5deec35b5bf8601c499efad9a1a1f1a81d9" exitCode=0 Jan 30 12:23:25 crc kubenswrapper[4703]: I0130 12:23:25.983204 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-cccmv" event={"ID":"c8357d8b-c1e1-4411-92c9-4de33313aaeb","Type":"ContainerDied","Data":"7776806fdae7c2480f7db42443abf5deec35b5bf8601c499efad9a1a1f1a81d9"} Jan 30 12:23:25 crc kubenswrapper[4703]: I0130 12:23:25.983256 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-cccmv" event={"ID":"c8357d8b-c1e1-4411-92c9-4de33313aaeb","Type":"ContainerStarted","Data":"f96b4388e40767582588b210e283c06412984bc5aaac06d7107321a9a84ef6d7"} Jan 30 12:23:25 crc kubenswrapper[4703]: I0130 12:23:25.984922 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:26 crc kubenswrapper[4703]: I0130 12:23:26.008558 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-wbn8k" event={"ID":"40732e3a-846a-44ee-bf77-7c90e541af74","Type":"ContainerStarted","Data":"f07407a994b4eb22db96cc6f20185276cc4464eedc124843068a40cc0bc0699b"} Jan 30 12:23:26 crc kubenswrapper[4703]: I0130 12:23:26.008624 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-wbn8k" event={"ID":"40732e3a-846a-44ee-bf77-7c90e541af74","Type":"ContainerStarted","Data":"50cd798d7b56ed2da2e633bf1b6584d0edecebb6e87c1195e3f8f5dec94b9756"} Jan 30 12:23:26 crc kubenswrapper[4703]: I0130 12:23:26.069934 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-757b4f8459-cccmv" podStartSLOduration=4.069901319 podStartE2EDuration="4.069901319s" podCreationTimestamp="2026-01-30 12:23:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:23:26.066380975 +0000 UTC m=+1641.844202649" watchObservedRunningTime="2026-01-30 12:23:26.069901319 +0000 UTC m=+1641.847722973" Jan 30 12:23:26 crc kubenswrapper[4703]: I0130 12:23:26.143615 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-wbn8k" podStartSLOduration=2.143586984 podStartE2EDuration="2.143586984s" podCreationTimestamp="2026-01-30 12:23:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:23:26.142519365 +0000 UTC m=+1641.920341019" watchObservedRunningTime="2026-01-30 12:23:26.143586984 +0000 UTC m=+1641.921408638" Jan 30 12:23:26 crc kubenswrapper[4703]: I0130 12:23:26.879604 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 30 12:23:26 crc kubenswrapper[4703]: I0130 12:23:26.906062 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 30 12:23:30 crc kubenswrapper[4703]: I0130 12:23:30.078188 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"391692f9-2c2e-48fc-8dda-74311b39393e","Type":"ContainerStarted","Data":"3e4358d211f700b275342fafeb5a4e3fd696724e2f5dffd3f16d085af9de3db0"} Jan 30 12:23:30 crc kubenswrapper[4703]: I0130 12:23:30.081013 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"391692f9-2c2e-48fc-8dda-74311b39393e","Type":"ContainerStarted","Data":"45c91da213234652ac10d900a6d444f01c737376a70a5dab1fb2929789e7ce41"} Jan 30 12:23:30 crc kubenswrapper[4703]: I0130 12:23:30.090659 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"355a9e7b-78db-4e03-997d-acc6136442e4","Type":"ContainerStarted","Data":"731db6fb3fa3680e6132c5dedbf118730f4151cdc31bf496748e70392e0796ef"} Jan 30 12:23:30 crc kubenswrapper[4703]: I0130 12:23:30.090871 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="355a9e7b-78db-4e03-997d-acc6136442e4" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://731db6fb3fa3680e6132c5dedbf118730f4151cdc31bf496748e70392e0796ef" gracePeriod=30 Jan 30 12:23:30 crc kubenswrapper[4703]: I0130 12:23:30.095269 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e9321c04-45e3-40ed-9d9d-bd6b1221c265","Type":"ContainerStarted","Data":"be52965fa8e60a20c1c103d8fa852cd9477fa5939310e76616a4850e0e651e53"} Jan 30 12:23:30 crc kubenswrapper[4703]: I0130 12:23:30.112079 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.4684281820000002 podStartE2EDuration="8.112055639s" podCreationTimestamp="2026-01-30 12:23:22 +0000 UTC" firstStartedPulling="2026-01-30 12:23:23.6880285 +0000 UTC m=+1639.465850154" lastFinishedPulling="2026-01-30 12:23:29.331655957 +0000 UTC m=+1645.109477611" observedRunningTime="2026-01-30 12:23:30.110720904 +0000 UTC m=+1645.888542558" watchObservedRunningTime="2026-01-30 12:23:30.112055639 +0000 UTC m=+1645.889877293" Jan 30 12:23:30 crc kubenswrapper[4703]: I0130 12:23:30.115812 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"227c7e91-67c4-46e3-b16d-ad10b3029e81","Type":"ContainerStarted","Data":"0977eb8443229ca360b311bce7bbf4d96207dc6d8821eea6cb4a4d6bdb4c7093"} Jan 30 12:23:30 crc kubenswrapper[4703]: I0130 12:23:30.115871 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"227c7e91-67c4-46e3-b16d-ad10b3029e81","Type":"ContainerStarted","Data":"92850b060bbf8ffb73d47e793e6e821342ee89797c0fe93ddd7a38294997fc9c"} Jan 30 12:23:30 crc kubenswrapper[4703]: I0130 12:23:30.115995 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="227c7e91-67c4-46e3-b16d-ad10b3029e81" containerName="nova-metadata-log" containerID="cri-o://92850b060bbf8ffb73d47e793e6e821342ee89797c0fe93ddd7a38294997fc9c" gracePeriod=30 Jan 30 12:23:30 crc kubenswrapper[4703]: I0130 12:23:30.116021 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="227c7e91-67c4-46e3-b16d-ad10b3029e81" containerName="nova-metadata-metadata" containerID="cri-o://0977eb8443229ca360b311bce7bbf4d96207dc6d8821eea6cb4a4d6bdb4c7093" gracePeriod=30 Jan 30 12:23:30 crc kubenswrapper[4703]: I0130 12:23:30.134740 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.510522592 podStartE2EDuration="8.134717047s" podCreationTimestamp="2026-01-30 12:23:22 +0000 UTC" firstStartedPulling="2026-01-30 12:23:23.673608104 +0000 UTC m=+1639.451429758" lastFinishedPulling="2026-01-30 12:23:29.297802559 +0000 UTC m=+1645.075624213" observedRunningTime="2026-01-30 12:23:30.131789218 +0000 UTC m=+1645.909610892" watchObservedRunningTime="2026-01-30 12:23:30.134717047 +0000 UTC m=+1645.912538701" Jan 30 12:23:30 crc kubenswrapper[4703]: I0130 12:23:30.163008 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.9502395740000003 podStartE2EDuration="8.162977664s" podCreationTimestamp="2026-01-30 12:23:22 +0000 UTC" firstStartedPulling="2026-01-30 12:23:24.04663792 +0000 UTC m=+1639.824459584" lastFinishedPulling="2026-01-30 12:23:29.25937602 +0000 UTC m=+1645.037197674" observedRunningTime="2026-01-30 12:23:30.160151968 +0000 UTC m=+1645.937973622" watchObservedRunningTime="2026-01-30 12:23:30.162977664 +0000 UTC m=+1645.940799318" Jan 30 12:23:30 crc kubenswrapper[4703]: I0130 12:23:30.208259 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.5888739579999998 podStartE2EDuration="8.208240177s" podCreationTimestamp="2026-01-30 12:23:22 +0000 UTC" firstStartedPulling="2026-01-30 12:23:24.687410741 +0000 UTC m=+1640.465232395" lastFinishedPulling="2026-01-30 12:23:29.30677696 +0000 UTC m=+1645.084598614" observedRunningTime="2026-01-30 12:23:30.190910932 +0000 UTC m=+1645.968732586" watchObservedRunningTime="2026-01-30 12:23:30.208240177 +0000 UTC m=+1645.986061831" Jan 30 12:23:31 crc kubenswrapper[4703]: I0130 12:23:31.134035 4703 generic.go:334] "Generic (PLEG): container finished" podID="227c7e91-67c4-46e3-b16d-ad10b3029e81" containerID="92850b060bbf8ffb73d47e793e6e821342ee89797c0fe93ddd7a38294997fc9c" exitCode=143 Jan 30 12:23:31 crc kubenswrapper[4703]: I0130 12:23:31.135385 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"227c7e91-67c4-46e3-b16d-ad10b3029e81","Type":"ContainerDied","Data":"92850b060bbf8ffb73d47e793e6e821342ee89797c0fe93ddd7a38294997fc9c"} Jan 30 12:23:32 crc kubenswrapper[4703]: I0130 12:23:32.538308 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 30 12:23:32 crc kubenswrapper[4703]: I0130 12:23:32.538400 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 30 12:23:32 crc kubenswrapper[4703]: I0130 12:23:32.695561 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:23:32 crc kubenswrapper[4703]: I0130 12:23:32.695632 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:23:32 crc kubenswrapper[4703]: I0130 12:23:32.750347 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 30 12:23:32 crc kubenswrapper[4703]: I0130 12:23:32.766744 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:23:32 crc kubenswrapper[4703]: I0130 12:23:32.883519 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 30 12:23:32 crc kubenswrapper[4703]: I0130 12:23:32.883667 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.008325 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.145763 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-mfcxn"] Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.146108 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" podUID="2c2c4fcd-7779-44aa-81a2-d544a730b5e9" containerName="dnsmasq-dns" containerID="cri-o://ed78a9b7c581e15e5ea81353b6241ff6a032d90d74b485e73a2ff1778617ba01" gracePeriod=10 Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.253998 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.627369 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="391692f9-2c2e-48fc-8dda-74311b39393e" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.207:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.627459 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="391692f9-2c2e-48fc-8dda-74311b39393e" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.207:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.729360 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.853632 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-ovsdbserver-nb\") pod \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.853706 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-dns-svc\") pod \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.853819 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-config\") pod \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.853969 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2nxj\" (UniqueName: \"kubernetes.io/projected/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-kube-api-access-t2nxj\") pod \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.854046 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-dns-swift-storage-0\") pod \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.854201 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-ovsdbserver-sb\") pod \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\" (UID: \"2c2c4fcd-7779-44aa-81a2-d544a730b5e9\") " Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.870766 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-kube-api-access-t2nxj" (OuterVolumeSpecName: "kube-api-access-t2nxj") pod "2c2c4fcd-7779-44aa-81a2-d544a730b5e9" (UID: "2c2c4fcd-7779-44aa-81a2-d544a730b5e9"). InnerVolumeSpecName "kube-api-access-t2nxj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.925516 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2c2c4fcd-7779-44aa-81a2-d544a730b5e9" (UID: "2c2c4fcd-7779-44aa-81a2-d544a730b5e9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.945857 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2c2c4fcd-7779-44aa-81a2-d544a730b5e9" (UID: "2c2c4fcd-7779-44aa-81a2-d544a730b5e9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.960385 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2nxj\" (UniqueName: \"kubernetes.io/projected/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-kube-api-access-t2nxj\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.960414 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.960426 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.963372 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2c2c4fcd-7779-44aa-81a2-d544a730b5e9" (UID: "2c2c4fcd-7779-44aa-81a2-d544a730b5e9"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.965669 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2c2c4fcd-7779-44aa-81a2-d544a730b5e9" (UID: "2c2c4fcd-7779-44aa-81a2-d544a730b5e9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:23:33 crc kubenswrapper[4703]: I0130 12:23:33.969402 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-config" (OuterVolumeSpecName: "config") pod "2c2c4fcd-7779-44aa-81a2-d544a730b5e9" (UID: "2c2c4fcd-7779-44aa-81a2-d544a730b5e9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.062251 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.062296 4703 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.062310 4703 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2c2c4fcd-7779-44aa-81a2-d544a730b5e9-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.209808 4703 generic.go:334] "Generic (PLEG): container finished" podID="e9321c04-45e3-40ed-9d9d-bd6b1221c265" containerID="be52965fa8e60a20c1c103d8fa852cd9477fa5939310e76616a4850e0e651e53" exitCode=1 Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.209895 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e9321c04-45e3-40ed-9d9d-bd6b1221c265","Type":"ContainerDied","Data":"be52965fa8e60a20c1c103d8fa852cd9477fa5939310e76616a4850e0e651e53"} Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.210694 4703 scope.go:117] "RemoveContainer" containerID="be52965fa8e60a20c1c103d8fa852cd9477fa5939310e76616a4850e0e651e53" Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.223542 4703 generic.go:334] "Generic (PLEG): container finished" podID="2c2c4fcd-7779-44aa-81a2-d544a730b5e9" containerID="ed78a9b7c581e15e5ea81353b6241ff6a032d90d74b485e73a2ff1778617ba01" exitCode=0 Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.223600 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" event={"ID":"2c2c4fcd-7779-44aa-81a2-d544a730b5e9","Type":"ContainerDied","Data":"ed78a9b7c581e15e5ea81353b6241ff6a032d90d74b485e73a2ff1778617ba01"} Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.223639 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" event={"ID":"2c2c4fcd-7779-44aa-81a2-d544a730b5e9","Type":"ContainerDied","Data":"60b300be6a4ae9b2ec83187000942fd08ef37e22971a21c928d4237b093a5874"} Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.223663 4703 scope.go:117] "RemoveContainer" containerID="ed78a9b7c581e15e5ea81353b6241ff6a032d90d74b485e73a2ff1778617ba01" Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.223876 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c9776ccc5-mfcxn" Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.298857 4703 scope.go:117] "RemoveContainer" containerID="11af69b92cae706c2908484b0a0c3b489b6f1f95bd8dd0a6006f861739282196" Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.362092 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-mfcxn"] Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.368690 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c9776ccc5-mfcxn"] Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.376469 4703 scope.go:117] "RemoveContainer" containerID="ed78a9b7c581e15e5ea81353b6241ff6a032d90d74b485e73a2ff1778617ba01" Jan 30 12:23:34 crc kubenswrapper[4703]: E0130 12:23:34.382335 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ed78a9b7c581e15e5ea81353b6241ff6a032d90d74b485e73a2ff1778617ba01\": container with ID starting with ed78a9b7c581e15e5ea81353b6241ff6a032d90d74b485e73a2ff1778617ba01 not found: ID does not exist" containerID="ed78a9b7c581e15e5ea81353b6241ff6a032d90d74b485e73a2ff1778617ba01" Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.382399 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ed78a9b7c581e15e5ea81353b6241ff6a032d90d74b485e73a2ff1778617ba01"} err="failed to get container status \"ed78a9b7c581e15e5ea81353b6241ff6a032d90d74b485e73a2ff1778617ba01\": rpc error: code = NotFound desc = could not find container \"ed78a9b7c581e15e5ea81353b6241ff6a032d90d74b485e73a2ff1778617ba01\": container with ID starting with ed78a9b7c581e15e5ea81353b6241ff6a032d90d74b485e73a2ff1778617ba01 not found: ID does not exist" Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.382434 4703 scope.go:117] "RemoveContainer" containerID="11af69b92cae706c2908484b0a0c3b489b6f1f95bd8dd0a6006f861739282196" Jan 30 12:23:34 crc kubenswrapper[4703]: E0130 12:23:34.383379 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11af69b92cae706c2908484b0a0c3b489b6f1f95bd8dd0a6006f861739282196\": container with ID starting with 11af69b92cae706c2908484b0a0c3b489b6f1f95bd8dd0a6006f861739282196 not found: ID does not exist" containerID="11af69b92cae706c2908484b0a0c3b489b6f1f95bd8dd0a6006f861739282196" Jan 30 12:23:34 crc kubenswrapper[4703]: I0130 12:23:34.383467 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11af69b92cae706c2908484b0a0c3b489b6f1f95bd8dd0a6006f861739282196"} err="failed to get container status \"11af69b92cae706c2908484b0a0c3b489b6f1f95bd8dd0a6006f861739282196\": rpc error: code = NotFound desc = could not find container \"11af69b92cae706c2908484b0a0c3b489b6f1f95bd8dd0a6006f861739282196\": container with ID starting with 11af69b92cae706c2908484b0a0c3b489b6f1f95bd8dd0a6006f861739282196 not found: ID does not exist" Jan 30 12:23:35 crc kubenswrapper[4703]: I0130 12:23:35.101157 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c2c4fcd-7779-44aa-81a2-d544a730b5e9" path="/var/lib/kubelet/pods/2c2c4fcd-7779-44aa-81a2-d544a730b5e9/volumes" Jan 30 12:23:35 crc kubenswrapper[4703]: I0130 12:23:35.240303 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e9321c04-45e3-40ed-9d9d-bd6b1221c265","Type":"ContainerStarted","Data":"2af9a742240b8092d3d291d859ef81f44136d991fe7dbdcae6ac841752ebb82d"} Jan 30 12:23:36 crc kubenswrapper[4703]: I0130 12:23:36.255265 4703 generic.go:334] "Generic (PLEG): container finished" podID="93b18574-a5a2-4317-b40d-eb021590ac96" containerID="f88fad3c745401aea84ad3bfafd365ae7f8cc354c2b585857b1eb141f5688b84" exitCode=0 Jan 30 12:23:36 crc kubenswrapper[4703]: I0130 12:23:36.255359 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-d9t7g" event={"ID":"93b18574-a5a2-4317-b40d-eb021590ac96","Type":"ContainerDied","Data":"f88fad3c745401aea84ad3bfafd365ae7f8cc354c2b585857b1eb141f5688b84"} Jan 30 12:23:37 crc kubenswrapper[4703]: I0130 12:23:37.270742 4703 generic.go:334] "Generic (PLEG): container finished" podID="40732e3a-846a-44ee-bf77-7c90e541af74" containerID="f07407a994b4eb22db96cc6f20185276cc4464eedc124843068a40cc0bc0699b" exitCode=0 Jan 30 12:23:37 crc kubenswrapper[4703]: I0130 12:23:37.270859 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-wbn8k" event={"ID":"40732e3a-846a-44ee-bf77-7c90e541af74","Type":"ContainerDied","Data":"f07407a994b4eb22db96cc6f20185276cc4464eedc124843068a40cc0bc0699b"} Jan 30 12:23:37 crc kubenswrapper[4703]: I0130 12:23:37.696223 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:23:37 crc kubenswrapper[4703]: I0130 12:23:37.785310 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-d9t7g" Jan 30 12:23:37 crc kubenswrapper[4703]: I0130 12:23:37.887606 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmt2j\" (UniqueName: \"kubernetes.io/projected/93b18574-a5a2-4317-b40d-eb021590ac96-kube-api-access-cmt2j\") pod \"93b18574-a5a2-4317-b40d-eb021590ac96\" (UID: \"93b18574-a5a2-4317-b40d-eb021590ac96\") " Jan 30 12:23:37 crc kubenswrapper[4703]: I0130 12:23:37.887818 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-config-data\") pod \"93b18574-a5a2-4317-b40d-eb021590ac96\" (UID: \"93b18574-a5a2-4317-b40d-eb021590ac96\") " Jan 30 12:23:37 crc kubenswrapper[4703]: I0130 12:23:37.887908 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-scripts\") pod \"93b18574-a5a2-4317-b40d-eb021590ac96\" (UID: \"93b18574-a5a2-4317-b40d-eb021590ac96\") " Jan 30 12:23:37 crc kubenswrapper[4703]: I0130 12:23:37.887990 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-combined-ca-bundle\") pod \"93b18574-a5a2-4317-b40d-eb021590ac96\" (UID: \"93b18574-a5a2-4317-b40d-eb021590ac96\") " Jan 30 12:23:37 crc kubenswrapper[4703]: I0130 12:23:37.896361 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-scripts" (OuterVolumeSpecName: "scripts") pod "93b18574-a5a2-4317-b40d-eb021590ac96" (UID: "93b18574-a5a2-4317-b40d-eb021590ac96"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:37 crc kubenswrapper[4703]: I0130 12:23:37.896760 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93b18574-a5a2-4317-b40d-eb021590ac96-kube-api-access-cmt2j" (OuterVolumeSpecName: "kube-api-access-cmt2j") pod "93b18574-a5a2-4317-b40d-eb021590ac96" (UID: "93b18574-a5a2-4317-b40d-eb021590ac96"). InnerVolumeSpecName "kube-api-access-cmt2j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:23:37 crc kubenswrapper[4703]: I0130 12:23:37.923763 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "93b18574-a5a2-4317-b40d-eb021590ac96" (UID: "93b18574-a5a2-4317-b40d-eb021590ac96"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:37 crc kubenswrapper[4703]: I0130 12:23:37.932717 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-config-data" (OuterVolumeSpecName: "config-data") pod "93b18574-a5a2-4317-b40d-eb021590ac96" (UID: "93b18574-a5a2-4317-b40d-eb021590ac96"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:37 crc kubenswrapper[4703]: I0130 12:23:37.990632 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:37 crc kubenswrapper[4703]: I0130 12:23:37.990681 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:37 crc kubenswrapper[4703]: I0130 12:23:37.990700 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmt2j\" (UniqueName: \"kubernetes.io/projected/93b18574-a5a2-4317-b40d-eb021590ac96-kube-api-access-cmt2j\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:37 crc kubenswrapper[4703]: I0130 12:23:37.990712 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93b18574-a5a2-4317-b40d-eb021590ac96-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.287013 4703 generic.go:334] "Generic (PLEG): container finished" podID="e9321c04-45e3-40ed-9d9d-bd6b1221c265" containerID="2af9a742240b8092d3d291d859ef81f44136d991fe7dbdcae6ac841752ebb82d" exitCode=1 Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.287109 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e9321c04-45e3-40ed-9d9d-bd6b1221c265","Type":"ContainerDied","Data":"2af9a742240b8092d3d291d859ef81f44136d991fe7dbdcae6ac841752ebb82d"} Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.287517 4703 scope.go:117] "RemoveContainer" containerID="be52965fa8e60a20c1c103d8fa852cd9477fa5939310e76616a4850e0e651e53" Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.288707 4703 scope.go:117] "RemoveContainer" containerID="2af9a742240b8092d3d291d859ef81f44136d991fe7dbdcae6ac841752ebb82d" Jan 30 12:23:38 crc kubenswrapper[4703]: E0130 12:23:38.289192 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 10s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(e9321c04-45e3-40ed-9d9d-bd6b1221c265)\"" pod="openstack/nova-scheduler-0" podUID="e9321c04-45e3-40ed-9d9d-bd6b1221c265" Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.290795 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-d9t7g" Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.290788 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-d9t7g" event={"ID":"93b18574-a5a2-4317-b40d-eb021590ac96","Type":"ContainerDied","Data":"4fa3439ac44c3c03ea1c5bc34a09423b78a19d73aee17731c80fda97a36c4599"} Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.290850 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4fa3439ac44c3c03ea1c5bc34a09423b78a19d73aee17731c80fda97a36c4599" Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.514077 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.514388 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="391692f9-2c2e-48fc-8dda-74311b39393e" containerName="nova-api-log" containerID="cri-o://45c91da213234652ac10d900a6d444f01c737376a70a5dab1fb2929789e7ce41" gracePeriod=30 Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.514780 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="391692f9-2c2e-48fc-8dda-74311b39393e" containerName="nova-api-api" containerID="cri-o://3e4358d211f700b275342fafeb5a4e3fd696724e2f5dffd3f16d085af9de3db0" gracePeriod=30 Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.533982 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.643590 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.829331 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-wbn8k" Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.917637 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-combined-ca-bundle\") pod \"40732e3a-846a-44ee-bf77-7c90e541af74\" (UID: \"40732e3a-846a-44ee-bf77-7c90e541af74\") " Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.917948 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rspp\" (UniqueName: \"kubernetes.io/projected/40732e3a-846a-44ee-bf77-7c90e541af74-kube-api-access-8rspp\") pod \"40732e3a-846a-44ee-bf77-7c90e541af74\" (UID: \"40732e3a-846a-44ee-bf77-7c90e541af74\") " Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.918033 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-config-data\") pod \"40732e3a-846a-44ee-bf77-7c90e541af74\" (UID: \"40732e3a-846a-44ee-bf77-7c90e541af74\") " Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.918139 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-scripts\") pod \"40732e3a-846a-44ee-bf77-7c90e541af74\" (UID: \"40732e3a-846a-44ee-bf77-7c90e541af74\") " Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.926190 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-scripts" (OuterVolumeSpecName: "scripts") pod "40732e3a-846a-44ee-bf77-7c90e541af74" (UID: "40732e3a-846a-44ee-bf77-7c90e541af74"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.926906 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40732e3a-846a-44ee-bf77-7c90e541af74-kube-api-access-8rspp" (OuterVolumeSpecName: "kube-api-access-8rspp") pod "40732e3a-846a-44ee-bf77-7c90e541af74" (UID: "40732e3a-846a-44ee-bf77-7c90e541af74"). InnerVolumeSpecName "kube-api-access-8rspp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.956730 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "40732e3a-846a-44ee-bf77-7c90e541af74" (UID: "40732e3a-846a-44ee-bf77-7c90e541af74"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:38 crc kubenswrapper[4703]: I0130 12:23:38.971115 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-config-data" (OuterVolumeSpecName: "config-data") pod "40732e3a-846a-44ee-bf77-7c90e541af74" (UID: "40732e3a-846a-44ee-bf77-7c90e541af74"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.021069 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rspp\" (UniqueName: \"kubernetes.io/projected/40732e3a-846a-44ee-bf77-7c90e541af74-kube-api-access-8rspp\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.021111 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.021139 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.021148 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/40732e3a-846a-44ee-bf77-7c90e541af74-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.308311 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-wbn8k" event={"ID":"40732e3a-846a-44ee-bf77-7c90e541af74","Type":"ContainerDied","Data":"50cd798d7b56ed2da2e633bf1b6584d0edecebb6e87c1195e3f8f5dec94b9756"} Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.308374 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="50cd798d7b56ed2da2e633bf1b6584d0edecebb6e87c1195e3f8f5dec94b9756" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.308381 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-wbn8k" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.316655 4703 generic.go:334] "Generic (PLEG): container finished" podID="391692f9-2c2e-48fc-8dda-74311b39393e" containerID="45c91da213234652ac10d900a6d444f01c737376a70a5dab1fb2929789e7ce41" exitCode=143 Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.316760 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"391692f9-2c2e-48fc-8dda-74311b39393e","Type":"ContainerDied","Data":"45c91da213234652ac10d900a6d444f01c737376a70a5dab1fb2929789e7ce41"} Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.484089 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 30 12:23:39 crc kubenswrapper[4703]: E0130 12:23:39.492570 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c2c4fcd-7779-44aa-81a2-d544a730b5e9" containerName="init" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.492616 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c2c4fcd-7779-44aa-81a2-d544a730b5e9" containerName="init" Jan 30 12:23:39 crc kubenswrapper[4703]: E0130 12:23:39.492628 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40732e3a-846a-44ee-bf77-7c90e541af74" containerName="nova-cell1-conductor-db-sync" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.492635 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="40732e3a-846a-44ee-bf77-7c90e541af74" containerName="nova-cell1-conductor-db-sync" Jan 30 12:23:39 crc kubenswrapper[4703]: E0130 12:23:39.492654 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c2c4fcd-7779-44aa-81a2-d544a730b5e9" containerName="dnsmasq-dns" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.492660 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c2c4fcd-7779-44aa-81a2-d544a730b5e9" containerName="dnsmasq-dns" Jan 30 12:23:39 crc kubenswrapper[4703]: E0130 12:23:39.492671 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93b18574-a5a2-4317-b40d-eb021590ac96" containerName="nova-manage" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.492677 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="93b18574-a5a2-4317-b40d-eb021590ac96" containerName="nova-manage" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.492887 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="40732e3a-846a-44ee-bf77-7c90e541af74" containerName="nova-cell1-conductor-db-sync" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.492921 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="93b18574-a5a2-4317-b40d-eb021590ac96" containerName="nova-manage" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.492932 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c2c4fcd-7779-44aa-81a2-d544a730b5e9" containerName="dnsmasq-dns" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.493826 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.498631 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.516207 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.640434 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rbtc\" (UniqueName: \"kubernetes.io/projected/db36ff33-3188-4e4b-ab14-8201ea11f938-kube-api-access-5rbtc\") pod \"nova-cell1-conductor-0\" (UID: \"db36ff33-3188-4e4b-ab14-8201ea11f938\") " pod="openstack/nova-cell1-conductor-0" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.641085 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db36ff33-3188-4e4b-ab14-8201ea11f938-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"db36ff33-3188-4e4b-ab14-8201ea11f938\") " pod="openstack/nova-cell1-conductor-0" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.641309 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db36ff33-3188-4e4b-ab14-8201ea11f938-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"db36ff33-3188-4e4b-ab14-8201ea11f938\") " pod="openstack/nova-cell1-conductor-0" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.744581 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rbtc\" (UniqueName: \"kubernetes.io/projected/db36ff33-3188-4e4b-ab14-8201ea11f938-kube-api-access-5rbtc\") pod \"nova-cell1-conductor-0\" (UID: \"db36ff33-3188-4e4b-ab14-8201ea11f938\") " pod="openstack/nova-cell1-conductor-0" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.744672 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db36ff33-3188-4e4b-ab14-8201ea11f938-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"db36ff33-3188-4e4b-ab14-8201ea11f938\") " pod="openstack/nova-cell1-conductor-0" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.744725 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db36ff33-3188-4e4b-ab14-8201ea11f938-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"db36ff33-3188-4e4b-ab14-8201ea11f938\") " pod="openstack/nova-cell1-conductor-0" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.750382 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db36ff33-3188-4e4b-ab14-8201ea11f938-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"db36ff33-3188-4e4b-ab14-8201ea11f938\") " pod="openstack/nova-cell1-conductor-0" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.755028 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db36ff33-3188-4e4b-ab14-8201ea11f938-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"db36ff33-3188-4e4b-ab14-8201ea11f938\") " pod="openstack/nova-cell1-conductor-0" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.763184 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rbtc\" (UniqueName: \"kubernetes.io/projected/db36ff33-3188-4e4b-ab14-8201ea11f938-kube-api-access-5rbtc\") pod \"nova-cell1-conductor-0\" (UID: \"db36ff33-3188-4e4b-ab14-8201ea11f938\") " pod="openstack/nova-cell1-conductor-0" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.816018 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 30 12:23:39 crc kubenswrapper[4703]: I0130 12:23:39.987226 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.053961 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pnmhm\" (UniqueName: \"kubernetes.io/projected/e9321c04-45e3-40ed-9d9d-bd6b1221c265-kube-api-access-pnmhm\") pod \"e9321c04-45e3-40ed-9d9d-bd6b1221c265\" (UID: \"e9321c04-45e3-40ed-9d9d-bd6b1221c265\") " Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.054074 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9321c04-45e3-40ed-9d9d-bd6b1221c265-config-data\") pod \"e9321c04-45e3-40ed-9d9d-bd6b1221c265\" (UID: \"e9321c04-45e3-40ed-9d9d-bd6b1221c265\") " Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.054184 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9321c04-45e3-40ed-9d9d-bd6b1221c265-combined-ca-bundle\") pod \"e9321c04-45e3-40ed-9d9d-bd6b1221c265\" (UID: \"e9321c04-45e3-40ed-9d9d-bd6b1221c265\") " Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.082096 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9321c04-45e3-40ed-9d9d-bd6b1221c265-kube-api-access-pnmhm" (OuterVolumeSpecName: "kube-api-access-pnmhm") pod "e9321c04-45e3-40ed-9d9d-bd6b1221c265" (UID: "e9321c04-45e3-40ed-9d9d-bd6b1221c265"). InnerVolumeSpecName "kube-api-access-pnmhm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.100027 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9321c04-45e3-40ed-9d9d-bd6b1221c265-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e9321c04-45e3-40ed-9d9d-bd6b1221c265" (UID: "e9321c04-45e3-40ed-9d9d-bd6b1221c265"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.102777 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9321c04-45e3-40ed-9d9d-bd6b1221c265-config-data" (OuterVolumeSpecName: "config-data") pod "e9321c04-45e3-40ed-9d9d-bd6b1221c265" (UID: "e9321c04-45e3-40ed-9d9d-bd6b1221c265"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.160021 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pnmhm\" (UniqueName: \"kubernetes.io/projected/e9321c04-45e3-40ed-9d9d-bd6b1221c265-kube-api-access-pnmhm\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.160061 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9321c04-45e3-40ed-9d9d-bd6b1221c265-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.160078 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9321c04-45e3-40ed-9d9d-bd6b1221c265-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.331707 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e9321c04-45e3-40ed-9d9d-bd6b1221c265","Type":"ContainerDied","Data":"bdd52643fe89ee144f7b0a5b3d317edb59be2df3abdc96d1ce35f368e61f3a38"} Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.331784 4703 scope.go:117] "RemoveContainer" containerID="2af9a742240b8092d3d291d859ef81f44136d991fe7dbdcae6ac841752ebb82d" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.331891 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.401110 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.414130 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.429809 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.448660 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 30 12:23:40 crc kubenswrapper[4703]: E0130 12:23:40.449513 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9321c04-45e3-40ed-9d9d-bd6b1221c265" containerName="nova-scheduler-scheduler" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.449550 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9321c04-45e3-40ed-9d9d-bd6b1221c265" containerName="nova-scheduler-scheduler" Jan 30 12:23:40 crc kubenswrapper[4703]: E0130 12:23:40.449568 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9321c04-45e3-40ed-9d9d-bd6b1221c265" containerName="nova-scheduler-scheduler" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.449577 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9321c04-45e3-40ed-9d9d-bd6b1221c265" containerName="nova-scheduler-scheduler" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.449881 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9321c04-45e3-40ed-9d9d-bd6b1221c265" containerName="nova-scheduler-scheduler" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.449903 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9321c04-45e3-40ed-9d9d-bd6b1221c265" containerName="nova-scheduler-scheduler" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.450974 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.454680 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.473481 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.580408 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5w55\" (UniqueName: \"kubernetes.io/projected/04da2786-8d07-4399-9200-95c1f596a97b-kube-api-access-h5w55\") pod \"nova-scheduler-0\" (UID: \"04da2786-8d07-4399-9200-95c1f596a97b\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.580591 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04da2786-8d07-4399-9200-95c1f596a97b-config-data\") pod \"nova-scheduler-0\" (UID: \"04da2786-8d07-4399-9200-95c1f596a97b\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.580735 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04da2786-8d07-4399-9200-95c1f596a97b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"04da2786-8d07-4399-9200-95c1f596a97b\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.683057 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04da2786-8d07-4399-9200-95c1f596a97b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"04da2786-8d07-4399-9200-95c1f596a97b\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.683931 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5w55\" (UniqueName: \"kubernetes.io/projected/04da2786-8d07-4399-9200-95c1f596a97b-kube-api-access-h5w55\") pod \"nova-scheduler-0\" (UID: \"04da2786-8d07-4399-9200-95c1f596a97b\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.684078 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04da2786-8d07-4399-9200-95c1f596a97b-config-data\") pod \"nova-scheduler-0\" (UID: \"04da2786-8d07-4399-9200-95c1f596a97b\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.688833 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04da2786-8d07-4399-9200-95c1f596a97b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"04da2786-8d07-4399-9200-95c1f596a97b\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.689290 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04da2786-8d07-4399-9200-95c1f596a97b-config-data\") pod \"nova-scheduler-0\" (UID: \"04da2786-8d07-4399-9200-95c1f596a97b\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.703850 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5w55\" (UniqueName: \"kubernetes.io/projected/04da2786-8d07-4399-9200-95c1f596a97b-kube-api-access-h5w55\") pod \"nova-scheduler-0\" (UID: \"04da2786-8d07-4399-9200-95c1f596a97b\") " pod="openstack/nova-scheduler-0" Jan 30 12:23:40 crc kubenswrapper[4703]: I0130 12:23:40.778392 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 30 12:23:41 crc kubenswrapper[4703]: I0130 12:23:41.120865 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9321c04-45e3-40ed-9d9d-bd6b1221c265" path="/var/lib/kubelet/pods/e9321c04-45e3-40ed-9d9d-bd6b1221c265/volumes" Jan 30 12:23:41 crc kubenswrapper[4703]: I0130 12:23:41.290729 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 30 12:23:41 crc kubenswrapper[4703]: I0130 12:23:41.375201 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"04da2786-8d07-4399-9200-95c1f596a97b","Type":"ContainerStarted","Data":"aeb85865225d4f900bcc240fa194117ccdc51f4bc0f9a2a2a59d8c6c225c10ea"} Jan 30 12:23:41 crc kubenswrapper[4703]: I0130 12:23:41.377665 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"db36ff33-3188-4e4b-ab14-8201ea11f938","Type":"ContainerStarted","Data":"d20ae5d831904df8056bcdbaaf9111664a2b8f50d471495a80242ec701a94dd0"} Jan 30 12:23:41 crc kubenswrapper[4703]: I0130 12:23:41.377693 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"db36ff33-3188-4e4b-ab14-8201ea11f938","Type":"ContainerStarted","Data":"1ebe9838939595fa012f677f505595f3708968c8d2e407437ed8ed12bee2b1eb"} Jan 30 12:23:41 crc kubenswrapper[4703]: I0130 12:23:41.377928 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 30 12:23:41 crc kubenswrapper[4703]: I0130 12:23:41.407710 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.407678786 podStartE2EDuration="2.407678786s" podCreationTimestamp="2026-01-30 12:23:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:23:41.396418865 +0000 UTC m=+1657.174240529" watchObservedRunningTime="2026-01-30 12:23:41.407678786 +0000 UTC m=+1657.185500430" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.297447 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.395458 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"04da2786-8d07-4399-9200-95c1f596a97b","Type":"ContainerStarted","Data":"2e07257590556a5cac2a62522e144e89374ce50945d49e9e874c10ec043c1e8a"} Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.398418 4703 generic.go:334] "Generic (PLEG): container finished" podID="391692f9-2c2e-48fc-8dda-74311b39393e" containerID="3e4358d211f700b275342fafeb5a4e3fd696724e2f5dffd3f16d085af9de3db0" exitCode=0 Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.398501 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.398531 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"391692f9-2c2e-48fc-8dda-74311b39393e","Type":"ContainerDied","Data":"3e4358d211f700b275342fafeb5a4e3fd696724e2f5dffd3f16d085af9de3db0"} Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.398573 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"391692f9-2c2e-48fc-8dda-74311b39393e","Type":"ContainerDied","Data":"98d7dd5cddd6a8504e3443492b1c17d866c7fb686b0269fdafe2ed9e3aa35ca5"} Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.398600 4703 scope.go:117] "RemoveContainer" containerID="3e4358d211f700b275342fafeb5a4e3fd696724e2f5dffd3f16d085af9de3db0" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.418611 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.418589376 podStartE2EDuration="2.418589376s" podCreationTimestamp="2026-01-30 12:23:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:23:42.413718536 +0000 UTC m=+1658.191540200" watchObservedRunningTime="2026-01-30 12:23:42.418589376 +0000 UTC m=+1658.196411030" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.444188 4703 scope.go:117] "RemoveContainer" containerID="45c91da213234652ac10d900a6d444f01c737376a70a5dab1fb2929789e7ce41" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.444258 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/391692f9-2c2e-48fc-8dda-74311b39393e-logs\") pod \"391692f9-2c2e-48fc-8dda-74311b39393e\" (UID: \"391692f9-2c2e-48fc-8dda-74311b39393e\") " Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.444486 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/391692f9-2c2e-48fc-8dda-74311b39393e-combined-ca-bundle\") pod \"391692f9-2c2e-48fc-8dda-74311b39393e\" (UID: \"391692f9-2c2e-48fc-8dda-74311b39393e\") " Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.444619 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rdbxn\" (UniqueName: \"kubernetes.io/projected/391692f9-2c2e-48fc-8dda-74311b39393e-kube-api-access-rdbxn\") pod \"391692f9-2c2e-48fc-8dda-74311b39393e\" (UID: \"391692f9-2c2e-48fc-8dda-74311b39393e\") " Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.444705 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/391692f9-2c2e-48fc-8dda-74311b39393e-config-data\") pod \"391692f9-2c2e-48fc-8dda-74311b39393e\" (UID: \"391692f9-2c2e-48fc-8dda-74311b39393e\") " Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.444906 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/391692f9-2c2e-48fc-8dda-74311b39393e-logs" (OuterVolumeSpecName: "logs") pod "391692f9-2c2e-48fc-8dda-74311b39393e" (UID: "391692f9-2c2e-48fc-8dda-74311b39393e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.445329 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/391692f9-2c2e-48fc-8dda-74311b39393e-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.468460 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/391692f9-2c2e-48fc-8dda-74311b39393e-kube-api-access-rdbxn" (OuterVolumeSpecName: "kube-api-access-rdbxn") pod "391692f9-2c2e-48fc-8dda-74311b39393e" (UID: "391692f9-2c2e-48fc-8dda-74311b39393e"). InnerVolumeSpecName "kube-api-access-rdbxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.502341 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/391692f9-2c2e-48fc-8dda-74311b39393e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "391692f9-2c2e-48fc-8dda-74311b39393e" (UID: "391692f9-2c2e-48fc-8dda-74311b39393e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.516746 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/391692f9-2c2e-48fc-8dda-74311b39393e-config-data" (OuterVolumeSpecName: "config-data") pod "391692f9-2c2e-48fc-8dda-74311b39393e" (UID: "391692f9-2c2e-48fc-8dda-74311b39393e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.549378 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/391692f9-2c2e-48fc-8dda-74311b39393e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.549425 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rdbxn\" (UniqueName: \"kubernetes.io/projected/391692f9-2c2e-48fc-8dda-74311b39393e-kube-api-access-rdbxn\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.549442 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/391692f9-2c2e-48fc-8dda-74311b39393e-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.592585 4703 scope.go:117] "RemoveContainer" containerID="3e4358d211f700b275342fafeb5a4e3fd696724e2f5dffd3f16d085af9de3db0" Jan 30 12:23:42 crc kubenswrapper[4703]: E0130 12:23:42.593383 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e4358d211f700b275342fafeb5a4e3fd696724e2f5dffd3f16d085af9de3db0\": container with ID starting with 3e4358d211f700b275342fafeb5a4e3fd696724e2f5dffd3f16d085af9de3db0 not found: ID does not exist" containerID="3e4358d211f700b275342fafeb5a4e3fd696724e2f5dffd3f16d085af9de3db0" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.593459 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e4358d211f700b275342fafeb5a4e3fd696724e2f5dffd3f16d085af9de3db0"} err="failed to get container status \"3e4358d211f700b275342fafeb5a4e3fd696724e2f5dffd3f16d085af9de3db0\": rpc error: code = NotFound desc = could not find container \"3e4358d211f700b275342fafeb5a4e3fd696724e2f5dffd3f16d085af9de3db0\": container with ID starting with 3e4358d211f700b275342fafeb5a4e3fd696724e2f5dffd3f16d085af9de3db0 not found: ID does not exist" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.593501 4703 scope.go:117] "RemoveContainer" containerID="45c91da213234652ac10d900a6d444f01c737376a70a5dab1fb2929789e7ce41" Jan 30 12:23:42 crc kubenswrapper[4703]: E0130 12:23:42.593997 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45c91da213234652ac10d900a6d444f01c737376a70a5dab1fb2929789e7ce41\": container with ID starting with 45c91da213234652ac10d900a6d444f01c737376a70a5dab1fb2929789e7ce41 not found: ID does not exist" containerID="45c91da213234652ac10d900a6d444f01c737376a70a5dab1fb2929789e7ce41" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.594036 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45c91da213234652ac10d900a6d444f01c737376a70a5dab1fb2929789e7ce41"} err="failed to get container status \"45c91da213234652ac10d900a6d444f01c737376a70a5dab1fb2929789e7ce41\": rpc error: code = NotFound desc = could not find container \"45c91da213234652ac10d900a6d444f01c737376a70a5dab1fb2929789e7ce41\": container with ID starting with 45c91da213234652ac10d900a6d444f01c737376a70a5dab1fb2929789e7ce41 not found: ID does not exist" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.740313 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.757073 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.803644 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 30 12:23:42 crc kubenswrapper[4703]: E0130 12:23:42.804920 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="391692f9-2c2e-48fc-8dda-74311b39393e" containerName="nova-api-log" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.804979 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="391692f9-2c2e-48fc-8dda-74311b39393e" containerName="nova-api-log" Jan 30 12:23:42 crc kubenswrapper[4703]: E0130 12:23:42.805142 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="391692f9-2c2e-48fc-8dda-74311b39393e" containerName="nova-api-api" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.805162 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="391692f9-2c2e-48fc-8dda-74311b39393e" containerName="nova-api-api" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.805944 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="391692f9-2c2e-48fc-8dda-74311b39393e" containerName="nova-api-api" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.806001 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="391692f9-2c2e-48fc-8dda-74311b39393e" containerName="nova-api-log" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.808376 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.816852 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.829952 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.830047 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.880294 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.961480 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0763380e-e32e-44b0-af83-61e43f0b5eeb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0763380e-e32e-44b0-af83-61e43f0b5eeb\") " pod="openstack/nova-api-0" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.961550 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0763380e-e32e-44b0-af83-61e43f0b5eeb-logs\") pod \"nova-api-0\" (UID: \"0763380e-e32e-44b0-af83-61e43f0b5eeb\") " pod="openstack/nova-api-0" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.961593 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0763380e-e32e-44b0-af83-61e43f0b5eeb-config-data\") pod \"nova-api-0\" (UID: \"0763380e-e32e-44b0-af83-61e43f0b5eeb\") " pod="openstack/nova-api-0" Jan 30 12:23:42 crc kubenswrapper[4703]: I0130 12:23:42.961646 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-td7m9\" (UniqueName: \"kubernetes.io/projected/0763380e-e32e-44b0-af83-61e43f0b5eeb-kube-api-access-td7m9\") pod \"nova-api-0\" (UID: \"0763380e-e32e-44b0-af83-61e43f0b5eeb\") " pod="openstack/nova-api-0" Jan 30 12:23:43 crc kubenswrapper[4703]: I0130 12:23:43.063771 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0763380e-e32e-44b0-af83-61e43f0b5eeb-config-data\") pod \"nova-api-0\" (UID: \"0763380e-e32e-44b0-af83-61e43f0b5eeb\") " pod="openstack/nova-api-0" Jan 30 12:23:43 crc kubenswrapper[4703]: I0130 12:23:43.064242 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-td7m9\" (UniqueName: \"kubernetes.io/projected/0763380e-e32e-44b0-af83-61e43f0b5eeb-kube-api-access-td7m9\") pod \"nova-api-0\" (UID: \"0763380e-e32e-44b0-af83-61e43f0b5eeb\") " pod="openstack/nova-api-0" Jan 30 12:23:43 crc kubenswrapper[4703]: I0130 12:23:43.064385 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0763380e-e32e-44b0-af83-61e43f0b5eeb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0763380e-e32e-44b0-af83-61e43f0b5eeb\") " pod="openstack/nova-api-0" Jan 30 12:23:43 crc kubenswrapper[4703]: I0130 12:23:43.064434 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0763380e-e32e-44b0-af83-61e43f0b5eeb-logs\") pod \"nova-api-0\" (UID: \"0763380e-e32e-44b0-af83-61e43f0b5eeb\") " pod="openstack/nova-api-0" Jan 30 12:23:43 crc kubenswrapper[4703]: I0130 12:23:43.064793 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0763380e-e32e-44b0-af83-61e43f0b5eeb-logs\") pod \"nova-api-0\" (UID: \"0763380e-e32e-44b0-af83-61e43f0b5eeb\") " pod="openstack/nova-api-0" Jan 30 12:23:43 crc kubenswrapper[4703]: I0130 12:23:43.069748 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0763380e-e32e-44b0-af83-61e43f0b5eeb-config-data\") pod \"nova-api-0\" (UID: \"0763380e-e32e-44b0-af83-61e43f0b5eeb\") " pod="openstack/nova-api-0" Jan 30 12:23:43 crc kubenswrapper[4703]: I0130 12:23:43.085050 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0763380e-e32e-44b0-af83-61e43f0b5eeb-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0763380e-e32e-44b0-af83-61e43f0b5eeb\") " pod="openstack/nova-api-0" Jan 30 12:23:43 crc kubenswrapper[4703]: I0130 12:23:43.098846 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-td7m9\" (UniqueName: \"kubernetes.io/projected/0763380e-e32e-44b0-af83-61e43f0b5eeb-kube-api-access-td7m9\") pod \"nova-api-0\" (UID: \"0763380e-e32e-44b0-af83-61e43f0b5eeb\") " pod="openstack/nova-api-0" Jan 30 12:23:43 crc kubenswrapper[4703]: I0130 12:23:43.121944 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="391692f9-2c2e-48fc-8dda-74311b39393e" path="/var/lib/kubelet/pods/391692f9-2c2e-48fc-8dda-74311b39393e/volumes" Jan 30 12:23:43 crc kubenswrapper[4703]: I0130 12:23:43.137213 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 30 12:23:44 crc kubenswrapper[4703]: I0130 12:23:43.748913 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 30 12:23:44 crc kubenswrapper[4703]: W0130 12:23:43.763283 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0763380e_e32e_44b0_af83_61e43f0b5eeb.slice/crio-91fd62b45ad39c550162b9bb41534f147eebe30ccf8091f10445dfd79614947f WatchSource:0}: Error finding container 91fd62b45ad39c550162b9bb41534f147eebe30ccf8091f10445dfd79614947f: Status 404 returned error can't find the container with id 91fd62b45ad39c550162b9bb41534f147eebe30ccf8091f10445dfd79614947f Jan 30 12:23:44 crc kubenswrapper[4703]: I0130 12:23:44.370836 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 30 12:23:44 crc kubenswrapper[4703]: I0130 12:23:44.371680 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="7847f607-512e-440a-af08-8fc3104621b6" containerName="kube-state-metrics" containerID="cri-o://15669b51c5fe1aa82ec8da8d695ffce6ef991203cc9db102d30a7a7359dec759" gracePeriod=30 Jan 30 12:23:44 crc kubenswrapper[4703]: I0130 12:23:44.454537 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0763380e-e32e-44b0-af83-61e43f0b5eeb","Type":"ContainerStarted","Data":"eab36726edd9a9c79d9f4785e026c0dd0d0cb555bab1949db44ffe7cc6b5b302"} Jan 30 12:23:44 crc kubenswrapper[4703]: I0130 12:23:44.454606 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0763380e-e32e-44b0-af83-61e43f0b5eeb","Type":"ContainerStarted","Data":"91fd62b45ad39c550162b9bb41534f147eebe30ccf8091f10445dfd79614947f"} Jan 30 12:23:44 crc kubenswrapper[4703]: I0130 12:23:44.954882 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.136607 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2cbff\" (UniqueName: \"kubernetes.io/projected/7847f607-512e-440a-af08-8fc3104621b6-kube-api-access-2cbff\") pod \"7847f607-512e-440a-af08-8fc3104621b6\" (UID: \"7847f607-512e-440a-af08-8fc3104621b6\") " Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.146442 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7847f607-512e-440a-af08-8fc3104621b6-kube-api-access-2cbff" (OuterVolumeSpecName: "kube-api-access-2cbff") pod "7847f607-512e-440a-af08-8fc3104621b6" (UID: "7847f607-512e-440a-af08-8fc3104621b6"). InnerVolumeSpecName "kube-api-access-2cbff". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.240625 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2cbff\" (UniqueName: \"kubernetes.io/projected/7847f607-512e-440a-af08-8fc3104621b6-kube-api-access-2cbff\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.467030 4703 generic.go:334] "Generic (PLEG): container finished" podID="7847f607-512e-440a-af08-8fc3104621b6" containerID="15669b51c5fe1aa82ec8da8d695ffce6ef991203cc9db102d30a7a7359dec759" exitCode=2 Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.467094 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.467134 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"7847f607-512e-440a-af08-8fc3104621b6","Type":"ContainerDied","Data":"15669b51c5fe1aa82ec8da8d695ffce6ef991203cc9db102d30a7a7359dec759"} Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.467195 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"7847f607-512e-440a-af08-8fc3104621b6","Type":"ContainerDied","Data":"5b453f412f4d9bec9b19ade585ee14310446e6d5fca143a0b323c638dcd53e9c"} Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.467223 4703 scope.go:117] "RemoveContainer" containerID="15669b51c5fe1aa82ec8da8d695ffce6ef991203cc9db102d30a7a7359dec759" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.471557 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0763380e-e32e-44b0-af83-61e43f0b5eeb","Type":"ContainerStarted","Data":"b3aaab44e4e2c7a1b909855bc0a361d54ac0cddbfaabcd07b5ad7dc49bb16a01"} Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.501694 4703 scope.go:117] "RemoveContainer" containerID="15669b51c5fe1aa82ec8da8d695ffce6ef991203cc9db102d30a7a7359dec759" Jan 30 12:23:45 crc kubenswrapper[4703]: E0130 12:23:45.502483 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15669b51c5fe1aa82ec8da8d695ffce6ef991203cc9db102d30a7a7359dec759\": container with ID starting with 15669b51c5fe1aa82ec8da8d695ffce6ef991203cc9db102d30a7a7359dec759 not found: ID does not exist" containerID="15669b51c5fe1aa82ec8da8d695ffce6ef991203cc9db102d30a7a7359dec759" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.502567 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15669b51c5fe1aa82ec8da8d695ffce6ef991203cc9db102d30a7a7359dec759"} err="failed to get container status \"15669b51c5fe1aa82ec8da8d695ffce6ef991203cc9db102d30a7a7359dec759\": rpc error: code = NotFound desc = could not find container \"15669b51c5fe1aa82ec8da8d695ffce6ef991203cc9db102d30a7a7359dec759\": container with ID starting with 15669b51c5fe1aa82ec8da8d695ffce6ef991203cc9db102d30a7a7359dec759 not found: ID does not exist" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.511510 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.5114823189999997 podStartE2EDuration="3.511482319s" podCreationTimestamp="2026-01-30 12:23:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:23:45.506304891 +0000 UTC m=+1661.284126545" watchObservedRunningTime="2026-01-30 12:23:45.511482319 +0000 UTC m=+1661.289303973" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.537376 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.552218 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.564239 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 30 12:23:45 crc kubenswrapper[4703]: E0130 12:23:45.565593 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7847f607-512e-440a-af08-8fc3104621b6" containerName="kube-state-metrics" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.565624 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="7847f607-512e-440a-af08-8fc3104621b6" containerName="kube-state-metrics" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.565919 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="7847f607-512e-440a-af08-8fc3104621b6" containerName="kube-state-metrics" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.567084 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.571824 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.572229 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.581487 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.752824 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tstkx\" (UniqueName: \"kubernetes.io/projected/18ac36a5-1494-44b5-9a0a-cd275513992a-kube-api-access-tstkx\") pod \"kube-state-metrics-0\" (UID: \"18ac36a5-1494-44b5-9a0a-cd275513992a\") " pod="openstack/kube-state-metrics-0" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.752887 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/18ac36a5-1494-44b5-9a0a-cd275513992a-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"18ac36a5-1494-44b5-9a0a-cd275513992a\") " pod="openstack/kube-state-metrics-0" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.752980 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/18ac36a5-1494-44b5-9a0a-cd275513992a-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"18ac36a5-1494-44b5-9a0a-cd275513992a\") " pod="openstack/kube-state-metrics-0" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.753045 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18ac36a5-1494-44b5-9a0a-cd275513992a-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"18ac36a5-1494-44b5-9a0a-cd275513992a\") " pod="openstack/kube-state-metrics-0" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.784994 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.856157 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/18ac36a5-1494-44b5-9a0a-cd275513992a-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"18ac36a5-1494-44b5-9a0a-cd275513992a\") " pod="openstack/kube-state-metrics-0" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.856226 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18ac36a5-1494-44b5-9a0a-cd275513992a-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"18ac36a5-1494-44b5-9a0a-cd275513992a\") " pod="openstack/kube-state-metrics-0" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.856413 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tstkx\" (UniqueName: \"kubernetes.io/projected/18ac36a5-1494-44b5-9a0a-cd275513992a-kube-api-access-tstkx\") pod \"kube-state-metrics-0\" (UID: \"18ac36a5-1494-44b5-9a0a-cd275513992a\") " pod="openstack/kube-state-metrics-0" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.856458 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/18ac36a5-1494-44b5-9a0a-cd275513992a-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"18ac36a5-1494-44b5-9a0a-cd275513992a\") " pod="openstack/kube-state-metrics-0" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.867307 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18ac36a5-1494-44b5-9a0a-cd275513992a-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"18ac36a5-1494-44b5-9a0a-cd275513992a\") " pod="openstack/kube-state-metrics-0" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.870925 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/18ac36a5-1494-44b5-9a0a-cd275513992a-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"18ac36a5-1494-44b5-9a0a-cd275513992a\") " pod="openstack/kube-state-metrics-0" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.885924 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/18ac36a5-1494-44b5-9a0a-cd275513992a-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"18ac36a5-1494-44b5-9a0a-cd275513992a\") " pod="openstack/kube-state-metrics-0" Jan 30 12:23:45 crc kubenswrapper[4703]: I0130 12:23:45.906934 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tstkx\" (UniqueName: \"kubernetes.io/projected/18ac36a5-1494-44b5-9a0a-cd275513992a-kube-api-access-tstkx\") pod \"kube-state-metrics-0\" (UID: \"18ac36a5-1494-44b5-9a0a-cd275513992a\") " pod="openstack/kube-state-metrics-0" Jan 30 12:23:46 crc kubenswrapper[4703]: I0130 12:23:46.190194 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 30 12:23:46 crc kubenswrapper[4703]: I0130 12:23:46.490525 4703 generic.go:334] "Generic (PLEG): container finished" podID="04da2786-8d07-4399-9200-95c1f596a97b" containerID="2e07257590556a5cac2a62522e144e89374ce50945d49e9e874c10ec043c1e8a" exitCode=1 Jan 30 12:23:46 crc kubenswrapper[4703]: I0130 12:23:46.490638 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"04da2786-8d07-4399-9200-95c1f596a97b","Type":"ContainerDied","Data":"2e07257590556a5cac2a62522e144e89374ce50945d49e9e874c10ec043c1e8a"} Jan 30 12:23:46 crc kubenswrapper[4703]: I0130 12:23:46.492336 4703 scope.go:117] "RemoveContainer" containerID="2e07257590556a5cac2a62522e144e89374ce50945d49e9e874c10ec043c1e8a" Jan 30 12:23:46 crc kubenswrapper[4703]: W0130 12:23:46.750682 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18ac36a5_1494_44b5_9a0a_cd275513992a.slice/crio-c73d1785915902a95049192fe43e2f00989d9d67e929d095c601d924a3e591e9 WatchSource:0}: Error finding container c73d1785915902a95049192fe43e2f00989d9d67e929d095c601d924a3e591e9: Status 404 returned error can't find the container with id c73d1785915902a95049192fe43e2f00989d9d67e929d095c601d924a3e591e9 Jan 30 12:23:46 crc kubenswrapper[4703]: I0130 12:23:46.754070 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 30 12:23:47 crc kubenswrapper[4703]: I0130 12:23:47.097903 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7847f607-512e-440a-af08-8fc3104621b6" path="/var/lib/kubelet/pods/7847f607-512e-440a-af08-8fc3104621b6/volumes" Jan 30 12:23:47 crc kubenswrapper[4703]: I0130 12:23:47.405441 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:23:47 crc kubenswrapper[4703]: I0130 12:23:47.406243 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerName="ceilometer-central-agent" containerID="cri-o://36966515af4078e2e076520f849000cfa7ae40e1a6cf9753411648a7299761b6" gracePeriod=30 Jan 30 12:23:47 crc kubenswrapper[4703]: I0130 12:23:47.407245 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerName="sg-core" containerID="cri-o://163c6fb9f14a050376cbbacf469f4c8d7803e870094ed88896666efa17df373f" gracePeriod=30 Jan 30 12:23:47 crc kubenswrapper[4703]: I0130 12:23:47.407299 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerName="proxy-httpd" containerID="cri-o://b8982760796eb97198d793f674a5e4cb4266ddbd5e94dfa6477a2fb2a86e62d0" gracePeriod=30 Jan 30 12:23:47 crc kubenswrapper[4703]: I0130 12:23:47.407326 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerName="ceilometer-notification-agent" containerID="cri-o://02dedaa78642f3180e90f003393dc0b541ff490fa8875c09df6abcb405e4459a" gracePeriod=30 Jan 30 12:23:47 crc kubenswrapper[4703]: I0130 12:23:47.508543 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"18ac36a5-1494-44b5-9a0a-cd275513992a","Type":"ContainerStarted","Data":"19264ba0a50311bbb2a4115f69dec761f2db70dc75193053511c49fb70085a04"} Jan 30 12:23:47 crc kubenswrapper[4703]: I0130 12:23:47.508621 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"18ac36a5-1494-44b5-9a0a-cd275513992a","Type":"ContainerStarted","Data":"c73d1785915902a95049192fe43e2f00989d9d67e929d095c601d924a3e591e9"} Jan 30 12:23:47 crc kubenswrapper[4703]: I0130 12:23:47.508838 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 30 12:23:47 crc kubenswrapper[4703]: I0130 12:23:47.511966 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"04da2786-8d07-4399-9200-95c1f596a97b","Type":"ContainerStarted","Data":"bc692c50fb600574754b3e0a1136b2015ad236386acb6358e1bce88cb71e9b73"} Jan 30 12:23:47 crc kubenswrapper[4703]: I0130 12:23:47.550579 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.143324579 podStartE2EDuration="2.550551062s" podCreationTimestamp="2026-01-30 12:23:45 +0000 UTC" firstStartedPulling="2026-01-30 12:23:46.75454298 +0000 UTC m=+1662.532364634" lastFinishedPulling="2026-01-30 12:23:47.161769473 +0000 UTC m=+1662.939591117" observedRunningTime="2026-01-30 12:23:47.541042287 +0000 UTC m=+1663.318863941" watchObservedRunningTime="2026-01-30 12:23:47.550551062 +0000 UTC m=+1663.328372716" Jan 30 12:23:48 crc kubenswrapper[4703]: I0130 12:23:48.527675 4703 generic.go:334] "Generic (PLEG): container finished" podID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerID="b8982760796eb97198d793f674a5e4cb4266ddbd5e94dfa6477a2fb2a86e62d0" exitCode=0 Jan 30 12:23:48 crc kubenswrapper[4703]: I0130 12:23:48.527730 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef","Type":"ContainerDied","Data":"b8982760796eb97198d793f674a5e4cb4266ddbd5e94dfa6477a2fb2a86e62d0"} Jan 30 12:23:48 crc kubenswrapper[4703]: I0130 12:23:48.528268 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef","Type":"ContainerDied","Data":"163c6fb9f14a050376cbbacf469f4c8d7803e870094ed88896666efa17df373f"} Jan 30 12:23:48 crc kubenswrapper[4703]: I0130 12:23:48.528176 4703 generic.go:334] "Generic (PLEG): container finished" podID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerID="163c6fb9f14a050376cbbacf469f4c8d7803e870094ed88896666efa17df373f" exitCode=2 Jan 30 12:23:48 crc kubenswrapper[4703]: I0130 12:23:48.528310 4703 generic.go:334] "Generic (PLEG): container finished" podID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerID="36966515af4078e2e076520f849000cfa7ae40e1a6cf9753411648a7299761b6" exitCode=0 Jan 30 12:23:48 crc kubenswrapper[4703]: I0130 12:23:48.528694 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef","Type":"ContainerDied","Data":"36966515af4078e2e076520f849000cfa7ae40e1a6cf9753411648a7299761b6"} Jan 30 12:23:49 crc kubenswrapper[4703]: I0130 12:23:49.859545 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 30 12:23:49 crc kubenswrapper[4703]: I0130 12:23:49.948325 4703 scope.go:117] "RemoveContainer" containerID="8b75261f5087658d6065c79dbc80a36cab75a7635d5a2e56e959769b10d384e2" Jan 30 12:23:49 crc kubenswrapper[4703]: I0130 12:23:49.984555 4703 scope.go:117] "RemoveContainer" containerID="f22a6a175a2dc39c2b374372a89450bb61ba631f529ca883978751cf00e7dc4e" Jan 30 12:23:50 crc kubenswrapper[4703]: I0130 12:23:50.013184 4703 scope.go:117] "RemoveContainer" containerID="3aace889b773977420b0ed6985271772bde7953e878840124cf13614e3ec5f49" Jan 30 12:23:50 crc kubenswrapper[4703]: I0130 12:23:50.554008 4703 generic.go:334] "Generic (PLEG): container finished" podID="04da2786-8d07-4399-9200-95c1f596a97b" containerID="bc692c50fb600574754b3e0a1136b2015ad236386acb6358e1bce88cb71e9b73" exitCode=1 Jan 30 12:23:50 crc kubenswrapper[4703]: I0130 12:23:50.554073 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"04da2786-8d07-4399-9200-95c1f596a97b","Type":"ContainerDied","Data":"bc692c50fb600574754b3e0a1136b2015ad236386acb6358e1bce88cb71e9b73"} Jan 30 12:23:50 crc kubenswrapper[4703]: I0130 12:23:50.554571 4703 scope.go:117] "RemoveContainer" containerID="2e07257590556a5cac2a62522e144e89374ce50945d49e9e874c10ec043c1e8a" Jan 30 12:23:50 crc kubenswrapper[4703]: I0130 12:23:50.555485 4703 scope.go:117] "RemoveContainer" containerID="bc692c50fb600574754b3e0a1136b2015ad236386acb6358e1bce88cb71e9b73" Jan 30 12:23:50 crc kubenswrapper[4703]: E0130 12:23:50.556137 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 10s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(04da2786-8d07-4399-9200-95c1f596a97b)\"" pod="openstack/nova-scheduler-0" podUID="04da2786-8d07-4399-9200-95c1f596a97b" Jan 30 12:23:50 crc kubenswrapper[4703]: I0130 12:23:50.779315 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:23:50 crc kubenswrapper[4703]: I0130 12:23:50.779419 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:23:50 crc kubenswrapper[4703]: I0130 12:23:50.779471 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:23:50 crc kubenswrapper[4703]: I0130 12:23:50.779482 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.390402 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.513534 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mdhp7\" (UniqueName: \"kubernetes.io/projected/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-kube-api-access-mdhp7\") pod \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.513702 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-run-httpd\") pod \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.513842 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-scripts\") pod \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.514014 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-sg-core-conf-yaml\") pod \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.514154 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-config-data\") pod \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.514223 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-combined-ca-bundle\") pod \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.514233 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" (UID: "c6dbdaa3-2ccf-4e14-98b4-34be22a2edef"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.514324 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-log-httpd\") pod \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\" (UID: \"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef\") " Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.515037 4703 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.515535 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" (UID: "c6dbdaa3-2ccf-4e14-98b4-34be22a2edef"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.524430 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-scripts" (OuterVolumeSpecName: "scripts") pod "c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" (UID: "c6dbdaa3-2ccf-4e14-98b4-34be22a2edef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.524627 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-kube-api-access-mdhp7" (OuterVolumeSpecName: "kube-api-access-mdhp7") pod "c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" (UID: "c6dbdaa3-2ccf-4e14-98b4-34be22a2edef"). InnerVolumeSpecName "kube-api-access-mdhp7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.556910 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" (UID: "c6dbdaa3-2ccf-4e14-98b4-34be22a2edef"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.573491 4703 scope.go:117] "RemoveContainer" containerID="bc692c50fb600574754b3e0a1136b2015ad236386acb6358e1bce88cb71e9b73" Jan 30 12:23:51 crc kubenswrapper[4703]: E0130 12:23:51.573964 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 10s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(04da2786-8d07-4399-9200-95c1f596a97b)\"" pod="openstack/nova-scheduler-0" podUID="04da2786-8d07-4399-9200-95c1f596a97b" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.581085 4703 generic.go:334] "Generic (PLEG): container finished" podID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerID="02dedaa78642f3180e90f003393dc0b541ff490fa8875c09df6abcb405e4459a" exitCode=0 Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.581170 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.581505 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef","Type":"ContainerDied","Data":"02dedaa78642f3180e90f003393dc0b541ff490fa8875c09df6abcb405e4459a"} Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.581735 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c6dbdaa3-2ccf-4e14-98b4-34be22a2edef","Type":"ContainerDied","Data":"e1e147ef7809d57d14a7fe8bec657869126318174307d825b49360370c642912"} Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.581853 4703 scope.go:117] "RemoveContainer" containerID="b8982760796eb97198d793f674a5e4cb4266ddbd5e94dfa6477a2fb2a86e62d0" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.617882 4703 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.617928 4703 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.617942 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mdhp7\" (UniqueName: \"kubernetes.io/projected/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-kube-api-access-mdhp7\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.617957 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.625743 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" (UID: "c6dbdaa3-2ccf-4e14-98b4-34be22a2edef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.659343 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-config-data" (OuterVolumeSpecName: "config-data") pod "c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" (UID: "c6dbdaa3-2ccf-4e14-98b4-34be22a2edef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.719943 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.720466 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.726930 4703 scope.go:117] "RemoveContainer" containerID="163c6fb9f14a050376cbbacf469f4c8d7803e870094ed88896666efa17df373f" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.748825 4703 scope.go:117] "RemoveContainer" containerID="02dedaa78642f3180e90f003393dc0b541ff490fa8875c09df6abcb405e4459a" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.771380 4703 scope.go:117] "RemoveContainer" containerID="36966515af4078e2e076520f849000cfa7ae40e1a6cf9753411648a7299761b6" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.794761 4703 scope.go:117] "RemoveContainer" containerID="b8982760796eb97198d793f674a5e4cb4266ddbd5e94dfa6477a2fb2a86e62d0" Jan 30 12:23:51 crc kubenswrapper[4703]: E0130 12:23:51.795512 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8982760796eb97198d793f674a5e4cb4266ddbd5e94dfa6477a2fb2a86e62d0\": container with ID starting with b8982760796eb97198d793f674a5e4cb4266ddbd5e94dfa6477a2fb2a86e62d0 not found: ID does not exist" containerID="b8982760796eb97198d793f674a5e4cb4266ddbd5e94dfa6477a2fb2a86e62d0" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.795563 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8982760796eb97198d793f674a5e4cb4266ddbd5e94dfa6477a2fb2a86e62d0"} err="failed to get container status \"b8982760796eb97198d793f674a5e4cb4266ddbd5e94dfa6477a2fb2a86e62d0\": rpc error: code = NotFound desc = could not find container \"b8982760796eb97198d793f674a5e4cb4266ddbd5e94dfa6477a2fb2a86e62d0\": container with ID starting with b8982760796eb97198d793f674a5e4cb4266ddbd5e94dfa6477a2fb2a86e62d0 not found: ID does not exist" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.795594 4703 scope.go:117] "RemoveContainer" containerID="163c6fb9f14a050376cbbacf469f4c8d7803e870094ed88896666efa17df373f" Jan 30 12:23:51 crc kubenswrapper[4703]: E0130 12:23:51.796167 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"163c6fb9f14a050376cbbacf469f4c8d7803e870094ed88896666efa17df373f\": container with ID starting with 163c6fb9f14a050376cbbacf469f4c8d7803e870094ed88896666efa17df373f not found: ID does not exist" containerID="163c6fb9f14a050376cbbacf469f4c8d7803e870094ed88896666efa17df373f" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.796233 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"163c6fb9f14a050376cbbacf469f4c8d7803e870094ed88896666efa17df373f"} err="failed to get container status \"163c6fb9f14a050376cbbacf469f4c8d7803e870094ed88896666efa17df373f\": rpc error: code = NotFound desc = could not find container \"163c6fb9f14a050376cbbacf469f4c8d7803e870094ed88896666efa17df373f\": container with ID starting with 163c6fb9f14a050376cbbacf469f4c8d7803e870094ed88896666efa17df373f not found: ID does not exist" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.796278 4703 scope.go:117] "RemoveContainer" containerID="02dedaa78642f3180e90f003393dc0b541ff490fa8875c09df6abcb405e4459a" Jan 30 12:23:51 crc kubenswrapper[4703]: E0130 12:23:51.796833 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02dedaa78642f3180e90f003393dc0b541ff490fa8875c09df6abcb405e4459a\": container with ID starting with 02dedaa78642f3180e90f003393dc0b541ff490fa8875c09df6abcb405e4459a not found: ID does not exist" containerID="02dedaa78642f3180e90f003393dc0b541ff490fa8875c09df6abcb405e4459a" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.796868 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02dedaa78642f3180e90f003393dc0b541ff490fa8875c09df6abcb405e4459a"} err="failed to get container status \"02dedaa78642f3180e90f003393dc0b541ff490fa8875c09df6abcb405e4459a\": rpc error: code = NotFound desc = could not find container \"02dedaa78642f3180e90f003393dc0b541ff490fa8875c09df6abcb405e4459a\": container with ID starting with 02dedaa78642f3180e90f003393dc0b541ff490fa8875c09df6abcb405e4459a not found: ID does not exist" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.796888 4703 scope.go:117] "RemoveContainer" containerID="36966515af4078e2e076520f849000cfa7ae40e1a6cf9753411648a7299761b6" Jan 30 12:23:51 crc kubenswrapper[4703]: E0130 12:23:51.797199 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36966515af4078e2e076520f849000cfa7ae40e1a6cf9753411648a7299761b6\": container with ID starting with 36966515af4078e2e076520f849000cfa7ae40e1a6cf9753411648a7299761b6 not found: ID does not exist" containerID="36966515af4078e2e076520f849000cfa7ae40e1a6cf9753411648a7299761b6" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.797222 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36966515af4078e2e076520f849000cfa7ae40e1a6cf9753411648a7299761b6"} err="failed to get container status \"36966515af4078e2e076520f849000cfa7ae40e1a6cf9753411648a7299761b6\": rpc error: code = NotFound desc = could not find container \"36966515af4078e2e076520f849000cfa7ae40e1a6cf9753411648a7299761b6\": container with ID starting with 36966515af4078e2e076520f849000cfa7ae40e1a6cf9753411648a7299761b6 not found: ID does not exist" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.918725 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.934754 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.962286 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:23:51 crc kubenswrapper[4703]: E0130 12:23:51.962957 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerName="sg-core" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.962988 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerName="sg-core" Jan 30 12:23:51 crc kubenswrapper[4703]: E0130 12:23:51.963029 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerName="ceilometer-central-agent" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.963039 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerName="ceilometer-central-agent" Jan 30 12:23:51 crc kubenswrapper[4703]: E0130 12:23:51.963055 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerName="ceilometer-notification-agent" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.963064 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerName="ceilometer-notification-agent" Jan 30 12:23:51 crc kubenswrapper[4703]: E0130 12:23:51.963076 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerName="proxy-httpd" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.963084 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerName="proxy-httpd" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.963393 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerName="proxy-httpd" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.963442 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerName="sg-core" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.963466 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerName="ceilometer-central-agent" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.963485 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" containerName="ceilometer-notification-agent" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.966191 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.968890 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.970216 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.970339 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 30 12:23:51 crc kubenswrapper[4703]: I0130 12:23:51.976787 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.130112 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-config-data\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.130299 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.130428 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5334320c-225b-4cbe-b488-c0a9d27af74a-log-httpd\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.130512 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-scripts\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.130583 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5334320c-225b-4cbe-b488-c0a9d27af74a-run-httpd\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.130612 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.130683 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.130731 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br9tx\" (UniqueName: \"kubernetes.io/projected/5334320c-225b-4cbe-b488-c0a9d27af74a-kube-api-access-br9tx\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.232756 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.233923 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5334320c-225b-4cbe-b488-c0a9d27af74a-log-httpd\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.234503 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5334320c-225b-4cbe-b488-c0a9d27af74a-log-httpd\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.235678 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-scripts\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.236587 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5334320c-225b-4cbe-b488-c0a9d27af74a-run-httpd\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.236640 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.236722 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.236846 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br9tx\" (UniqueName: \"kubernetes.io/projected/5334320c-225b-4cbe-b488-c0a9d27af74a-kube-api-access-br9tx\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.236929 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-config-data\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.238720 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5334320c-225b-4cbe-b488-c0a9d27af74a-run-httpd\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.238965 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.246153 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.247958 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-scripts\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.250394 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.257495 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-config-data\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.264345 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br9tx\" (UniqueName: \"kubernetes.io/projected/5334320c-225b-4cbe-b488-c0a9d27af74a-kube-api-access-br9tx\") pod \"ceilometer-0\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.301039 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:23:52 crc kubenswrapper[4703]: I0130 12:23:52.815507 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:23:53 crc kubenswrapper[4703]: I0130 12:23:53.106095 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6dbdaa3-2ccf-4e14-98b4-34be22a2edef" path="/var/lib/kubelet/pods/c6dbdaa3-2ccf-4e14-98b4-34be22a2edef/volumes" Jan 30 12:23:53 crc kubenswrapper[4703]: I0130 12:23:53.137661 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 30 12:23:53 crc kubenswrapper[4703]: I0130 12:23:53.137752 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 30 12:23:53 crc kubenswrapper[4703]: I0130 12:23:53.623221 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5334320c-225b-4cbe-b488-c0a9d27af74a","Type":"ContainerStarted","Data":"69db2c1b807f8c4f8d8d79d91c8b6b8f098aaf0f8977af277cbbdb3f203623b1"} Jan 30 12:23:53 crc kubenswrapper[4703]: I0130 12:23:53.624215 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5334320c-225b-4cbe-b488-c0a9d27af74a","Type":"ContainerStarted","Data":"96eb8659686cbf15e6f797c4e028b01d9b92164064448490a7f5267cbcd6f55a"} Jan 30 12:23:54 crc kubenswrapper[4703]: I0130 12:23:54.220590 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="0763380e-e32e-44b0-af83-61e43f0b5eeb" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.215:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 12:23:54 crc kubenswrapper[4703]: I0130 12:23:54.220600 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="0763380e-e32e-44b0-af83-61e43f0b5eeb" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.215:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 12:23:54 crc kubenswrapper[4703]: I0130 12:23:54.636733 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5334320c-225b-4cbe-b488-c0a9d27af74a","Type":"ContainerStarted","Data":"fdb261a08857adc57aecf865bcf3d50fc7a90a85bd4d8c1e9fc3530a43554664"} Jan 30 12:23:55 crc kubenswrapper[4703]: I0130 12:23:55.664446 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5334320c-225b-4cbe-b488-c0a9d27af74a","Type":"ContainerStarted","Data":"c24e990f2786d3d6118707c4be0d49a7868269a4ffc8055b6a64223b7b144656"} Jan 30 12:23:56 crc kubenswrapper[4703]: I0130 12:23:56.205675 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 30 12:23:58 crc kubenswrapper[4703]: I0130 12:23:58.853691 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5334320c-225b-4cbe-b488-c0a9d27af74a","Type":"ContainerStarted","Data":"f990b2036d7a9ba04c5e37ed8108dde74561e08c81afcce07ab34e32c03eec13"} Jan 30 12:23:58 crc kubenswrapper[4703]: I0130 12:23:58.854739 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 30 12:23:58 crc kubenswrapper[4703]: I0130 12:23:58.889419 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.516294668 podStartE2EDuration="7.889379687s" podCreationTimestamp="2026-01-30 12:23:51 +0000 UTC" firstStartedPulling="2026-01-30 12:23:52.819279912 +0000 UTC m=+1668.597101566" lastFinishedPulling="2026-01-30 12:23:57.192364931 +0000 UTC m=+1672.970186585" observedRunningTime="2026-01-30 12:23:58.884104455 +0000 UTC m=+1674.661926109" watchObservedRunningTime="2026-01-30 12:23:58.889379687 +0000 UTC m=+1674.667201341" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.654704 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.754426 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.805615 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hp5c\" (UniqueName: \"kubernetes.io/projected/227c7e91-67c4-46e3-b16d-ad10b3029e81-kube-api-access-5hp5c\") pod \"227c7e91-67c4-46e3-b16d-ad10b3029e81\" (UID: \"227c7e91-67c4-46e3-b16d-ad10b3029e81\") " Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.805700 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/227c7e91-67c4-46e3-b16d-ad10b3029e81-logs\") pod \"227c7e91-67c4-46e3-b16d-ad10b3029e81\" (UID: \"227c7e91-67c4-46e3-b16d-ad10b3029e81\") " Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.805864 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/227c7e91-67c4-46e3-b16d-ad10b3029e81-combined-ca-bundle\") pod \"227c7e91-67c4-46e3-b16d-ad10b3029e81\" (UID: \"227c7e91-67c4-46e3-b16d-ad10b3029e81\") " Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.805921 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/227c7e91-67c4-46e3-b16d-ad10b3029e81-config-data\") pod \"227c7e91-67c4-46e3-b16d-ad10b3029e81\" (UID: \"227c7e91-67c4-46e3-b16d-ad10b3029e81\") " Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.806169 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/227c7e91-67c4-46e3-b16d-ad10b3029e81-logs" (OuterVolumeSpecName: "logs") pod "227c7e91-67c4-46e3-b16d-ad10b3029e81" (UID: "227c7e91-67c4-46e3-b16d-ad10b3029e81"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.806592 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/227c7e91-67c4-46e3-b16d-ad10b3029e81-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.814021 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/227c7e91-67c4-46e3-b16d-ad10b3029e81-kube-api-access-5hp5c" (OuterVolumeSpecName: "kube-api-access-5hp5c") pod "227c7e91-67c4-46e3-b16d-ad10b3029e81" (UID: "227c7e91-67c4-46e3-b16d-ad10b3029e81"). InnerVolumeSpecName "kube-api-access-5hp5c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.842242 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/227c7e91-67c4-46e3-b16d-ad10b3029e81-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "227c7e91-67c4-46e3-b16d-ad10b3029e81" (UID: "227c7e91-67c4-46e3-b16d-ad10b3029e81"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.849378 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/227c7e91-67c4-46e3-b16d-ad10b3029e81-config-data" (OuterVolumeSpecName: "config-data") pod "227c7e91-67c4-46e3-b16d-ad10b3029e81" (UID: "227c7e91-67c4-46e3-b16d-ad10b3029e81"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.876325 4703 generic.go:334] "Generic (PLEG): container finished" podID="355a9e7b-78db-4e03-997d-acc6136442e4" containerID="731db6fb3fa3680e6132c5dedbf118730f4151cdc31bf496748e70392e0796ef" exitCode=137 Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.876474 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.876497 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"355a9e7b-78db-4e03-997d-acc6136442e4","Type":"ContainerDied","Data":"731db6fb3fa3680e6132c5dedbf118730f4151cdc31bf496748e70392e0796ef"} Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.876579 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"355a9e7b-78db-4e03-997d-acc6136442e4","Type":"ContainerDied","Data":"f6accd44d0334349422265782d60472161e632f1089b0c43543c13f72e2ffd2f"} Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.876608 4703 scope.go:117] "RemoveContainer" containerID="731db6fb3fa3680e6132c5dedbf118730f4151cdc31bf496748e70392e0796ef" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.881078 4703 generic.go:334] "Generic (PLEG): container finished" podID="227c7e91-67c4-46e3-b16d-ad10b3029e81" containerID="0977eb8443229ca360b311bce7bbf4d96207dc6d8821eea6cb4a4d6bdb4c7093" exitCode=137 Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.881593 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.881585 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"227c7e91-67c4-46e3-b16d-ad10b3029e81","Type":"ContainerDied","Data":"0977eb8443229ca360b311bce7bbf4d96207dc6d8821eea6cb4a4d6bdb4c7093"} Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.881780 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"227c7e91-67c4-46e3-b16d-ad10b3029e81","Type":"ContainerDied","Data":"7a8c38ece3f7cf7207d9008363e12fea8c0d714d28828642cfa3e50d145618eb"} Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.920282 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fnnz6\" (UniqueName: \"kubernetes.io/projected/355a9e7b-78db-4e03-997d-acc6136442e4-kube-api-access-fnnz6\") pod \"355a9e7b-78db-4e03-997d-acc6136442e4\" (UID: \"355a9e7b-78db-4e03-997d-acc6136442e4\") " Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.920361 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/355a9e7b-78db-4e03-997d-acc6136442e4-combined-ca-bundle\") pod \"355a9e7b-78db-4e03-997d-acc6136442e4\" (UID: \"355a9e7b-78db-4e03-997d-acc6136442e4\") " Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.920465 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/355a9e7b-78db-4e03-997d-acc6136442e4-config-data\") pod \"355a9e7b-78db-4e03-997d-acc6136442e4\" (UID: \"355a9e7b-78db-4e03-997d-acc6136442e4\") " Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.921050 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/227c7e91-67c4-46e3-b16d-ad10b3029e81-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.921092 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/227c7e91-67c4-46e3-b16d-ad10b3029e81-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.921107 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hp5c\" (UniqueName: \"kubernetes.io/projected/227c7e91-67c4-46e3-b16d-ad10b3029e81-kube-api-access-5hp5c\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.925645 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/355a9e7b-78db-4e03-997d-acc6136442e4-kube-api-access-fnnz6" (OuterVolumeSpecName: "kube-api-access-fnnz6") pod "355a9e7b-78db-4e03-997d-acc6136442e4" (UID: "355a9e7b-78db-4e03-997d-acc6136442e4"). InnerVolumeSpecName "kube-api-access-fnnz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.929834 4703 scope.go:117] "RemoveContainer" containerID="731db6fb3fa3680e6132c5dedbf118730f4151cdc31bf496748e70392e0796ef" Jan 30 12:24:00 crc kubenswrapper[4703]: E0130 12:24:00.930591 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"731db6fb3fa3680e6132c5dedbf118730f4151cdc31bf496748e70392e0796ef\": container with ID starting with 731db6fb3fa3680e6132c5dedbf118730f4151cdc31bf496748e70392e0796ef not found: ID does not exist" containerID="731db6fb3fa3680e6132c5dedbf118730f4151cdc31bf496748e70392e0796ef" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.930630 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"731db6fb3fa3680e6132c5dedbf118730f4151cdc31bf496748e70392e0796ef"} err="failed to get container status \"731db6fb3fa3680e6132c5dedbf118730f4151cdc31bf496748e70392e0796ef\": rpc error: code = NotFound desc = could not find container \"731db6fb3fa3680e6132c5dedbf118730f4151cdc31bf496748e70392e0796ef\": container with ID starting with 731db6fb3fa3680e6132c5dedbf118730f4151cdc31bf496748e70392e0796ef not found: ID does not exist" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.930655 4703 scope.go:117] "RemoveContainer" containerID="0977eb8443229ca360b311bce7bbf4d96207dc6d8821eea6cb4a4d6bdb4c7093" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.947091 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.968763 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.978224 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/355a9e7b-78db-4e03-997d-acc6136442e4-config-data" (OuterVolumeSpecName: "config-data") pod "355a9e7b-78db-4e03-997d-acc6136442e4" (UID: "355a9e7b-78db-4e03-997d-acc6136442e4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.986891 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 30 12:24:00 crc kubenswrapper[4703]: E0130 12:24:00.987563 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="227c7e91-67c4-46e3-b16d-ad10b3029e81" containerName="nova-metadata-metadata" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.987594 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="227c7e91-67c4-46e3-b16d-ad10b3029e81" containerName="nova-metadata-metadata" Jan 30 12:24:00 crc kubenswrapper[4703]: E0130 12:24:00.987623 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="355a9e7b-78db-4e03-997d-acc6136442e4" containerName="nova-cell1-novncproxy-novncproxy" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.987632 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="355a9e7b-78db-4e03-997d-acc6136442e4" containerName="nova-cell1-novncproxy-novncproxy" Jan 30 12:24:00 crc kubenswrapper[4703]: E0130 12:24:00.987644 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="227c7e91-67c4-46e3-b16d-ad10b3029e81" containerName="nova-metadata-log" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.987652 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="227c7e91-67c4-46e3-b16d-ad10b3029e81" containerName="nova-metadata-log" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.987980 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="227c7e91-67c4-46e3-b16d-ad10b3029e81" containerName="nova-metadata-metadata" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.988008 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="227c7e91-67c4-46e3-b16d-ad10b3029e81" containerName="nova-metadata-log" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.988031 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="355a9e7b-78db-4e03-997d-acc6136442e4" containerName="nova-cell1-novncproxy-novncproxy" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.989609 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.994048 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/355a9e7b-78db-4e03-997d-acc6136442e4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "355a9e7b-78db-4e03-997d-acc6136442e4" (UID: "355a9e7b-78db-4e03-997d-acc6136442e4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.994617 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 30 12:24:00 crc kubenswrapper[4703]: I0130 12:24:00.995022 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.014514 4703 scope.go:117] "RemoveContainer" containerID="92850b060bbf8ffb73d47e793e6e821342ee89797c0fe93ddd7a38294997fc9c" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.014653 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.023088 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-config-data\") pod \"nova-metadata-0\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " pod="openstack/nova-metadata-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.023159 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2dc8f562-ab16-4028-8142-2fffd6f42a44-logs\") pod \"nova-metadata-0\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " pod="openstack/nova-metadata-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.023495 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " pod="openstack/nova-metadata-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.023765 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bm549\" (UniqueName: \"kubernetes.io/projected/2dc8f562-ab16-4028-8142-2fffd6f42a44-kube-api-access-bm549\") pod \"nova-metadata-0\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " pod="openstack/nova-metadata-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.023838 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " pod="openstack/nova-metadata-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.024216 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/355a9e7b-78db-4e03-997d-acc6136442e4-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.024243 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fnnz6\" (UniqueName: \"kubernetes.io/projected/355a9e7b-78db-4e03-997d-acc6136442e4-kube-api-access-fnnz6\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.024259 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/355a9e7b-78db-4e03-997d-acc6136442e4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.040056 4703 scope.go:117] "RemoveContainer" containerID="0977eb8443229ca360b311bce7bbf4d96207dc6d8821eea6cb4a4d6bdb4c7093" Jan 30 12:24:01 crc kubenswrapper[4703]: E0130 12:24:01.040783 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0977eb8443229ca360b311bce7bbf4d96207dc6d8821eea6cb4a4d6bdb4c7093\": container with ID starting with 0977eb8443229ca360b311bce7bbf4d96207dc6d8821eea6cb4a4d6bdb4c7093 not found: ID does not exist" containerID="0977eb8443229ca360b311bce7bbf4d96207dc6d8821eea6cb4a4d6bdb4c7093" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.040840 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0977eb8443229ca360b311bce7bbf4d96207dc6d8821eea6cb4a4d6bdb4c7093"} err="failed to get container status \"0977eb8443229ca360b311bce7bbf4d96207dc6d8821eea6cb4a4d6bdb4c7093\": rpc error: code = NotFound desc = could not find container \"0977eb8443229ca360b311bce7bbf4d96207dc6d8821eea6cb4a4d6bdb4c7093\": container with ID starting with 0977eb8443229ca360b311bce7bbf4d96207dc6d8821eea6cb4a4d6bdb4c7093 not found: ID does not exist" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.040878 4703 scope.go:117] "RemoveContainer" containerID="92850b060bbf8ffb73d47e793e6e821342ee89797c0fe93ddd7a38294997fc9c" Jan 30 12:24:01 crc kubenswrapper[4703]: E0130 12:24:01.041458 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92850b060bbf8ffb73d47e793e6e821342ee89797c0fe93ddd7a38294997fc9c\": container with ID starting with 92850b060bbf8ffb73d47e793e6e821342ee89797c0fe93ddd7a38294997fc9c not found: ID does not exist" containerID="92850b060bbf8ffb73d47e793e6e821342ee89797c0fe93ddd7a38294997fc9c" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.041521 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92850b060bbf8ffb73d47e793e6e821342ee89797c0fe93ddd7a38294997fc9c"} err="failed to get container status \"92850b060bbf8ffb73d47e793e6e821342ee89797c0fe93ddd7a38294997fc9c\": rpc error: code = NotFound desc = could not find container \"92850b060bbf8ffb73d47e793e6e821342ee89797c0fe93ddd7a38294997fc9c\": container with ID starting with 92850b060bbf8ffb73d47e793e6e821342ee89797c0fe93ddd7a38294997fc9c not found: ID does not exist" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.101196 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="227c7e91-67c4-46e3-b16d-ad10b3029e81" path="/var/lib/kubelet/pods/227c7e91-67c4-46e3-b16d-ad10b3029e81/volumes" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.127085 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bm549\" (UniqueName: \"kubernetes.io/projected/2dc8f562-ab16-4028-8142-2fffd6f42a44-kube-api-access-bm549\") pod \"nova-metadata-0\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " pod="openstack/nova-metadata-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.127176 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " pod="openstack/nova-metadata-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.127360 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-config-data\") pod \"nova-metadata-0\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " pod="openstack/nova-metadata-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.127395 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2dc8f562-ab16-4028-8142-2fffd6f42a44-logs\") pod \"nova-metadata-0\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " pod="openstack/nova-metadata-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.127512 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " pod="openstack/nova-metadata-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.128660 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2dc8f562-ab16-4028-8142-2fffd6f42a44-logs\") pod \"nova-metadata-0\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " pod="openstack/nova-metadata-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.134710 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " pod="openstack/nova-metadata-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.134749 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " pod="openstack/nova-metadata-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.136824 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-config-data\") pod \"nova-metadata-0\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " pod="openstack/nova-metadata-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.153416 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bm549\" (UniqueName: \"kubernetes.io/projected/2dc8f562-ab16-4028-8142-2fffd6f42a44-kube-api-access-bm549\") pod \"nova-metadata-0\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " pod="openstack/nova-metadata-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.275671 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.293221 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.315274 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.317464 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.319593 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.324422 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.324751 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.329793 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.330943 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.333559 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b602062-70c1-4485-87b8-a4aedb97350d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b602062-70c1-4485-87b8-a4aedb97350d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.333981 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b602062-70c1-4485-87b8-a4aedb97350d-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b602062-70c1-4485-87b8-a4aedb97350d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.334154 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b602062-70c1-4485-87b8-a4aedb97350d-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b602062-70c1-4485-87b8-a4aedb97350d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.334240 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b602062-70c1-4485-87b8-a4aedb97350d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b602062-70c1-4485-87b8-a4aedb97350d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.334364 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4b497\" (UniqueName: \"kubernetes.io/projected/9b602062-70c1-4485-87b8-a4aedb97350d-kube-api-access-4b497\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b602062-70c1-4485-87b8-a4aedb97350d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.437006 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b602062-70c1-4485-87b8-a4aedb97350d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b602062-70c1-4485-87b8-a4aedb97350d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.437431 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b602062-70c1-4485-87b8-a4aedb97350d-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b602062-70c1-4485-87b8-a4aedb97350d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.437918 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b602062-70c1-4485-87b8-a4aedb97350d-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b602062-70c1-4485-87b8-a4aedb97350d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.437965 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b602062-70c1-4485-87b8-a4aedb97350d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b602062-70c1-4485-87b8-a4aedb97350d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.438105 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4b497\" (UniqueName: \"kubernetes.io/projected/9b602062-70c1-4485-87b8-a4aedb97350d-kube-api-access-4b497\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b602062-70c1-4485-87b8-a4aedb97350d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.445961 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b602062-70c1-4485-87b8-a4aedb97350d-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b602062-70c1-4485-87b8-a4aedb97350d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.452101 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b602062-70c1-4485-87b8-a4aedb97350d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b602062-70c1-4485-87b8-a4aedb97350d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.455857 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b602062-70c1-4485-87b8-a4aedb97350d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b602062-70c1-4485-87b8-a4aedb97350d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.455912 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/9b602062-70c1-4485-87b8-a4aedb97350d-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b602062-70c1-4485-87b8-a4aedb97350d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.461371 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4b497\" (UniqueName: \"kubernetes.io/projected/9b602062-70c1-4485-87b8-a4aedb97350d-kube-api-access-4b497\") pod \"nova-cell1-novncproxy-0\" (UID: \"9b602062-70c1-4485-87b8-a4aedb97350d\") " pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.580221 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.852217 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 30 12:24:01 crc kubenswrapper[4703]: I0130 12:24:01.898710 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2dc8f562-ab16-4028-8142-2fffd6f42a44","Type":"ContainerStarted","Data":"f1eb7206036f73e753b374d7145226b8e85003a630c1c02d6126b47f3f610a5c"} Jan 30 12:24:02 crc kubenswrapper[4703]: I0130 12:24:02.122301 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 30 12:24:02 crc kubenswrapper[4703]: W0130 12:24:02.137117 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9b602062_70c1_4485_87b8_a4aedb97350d.slice/crio-8c8bf4831eb9b136f17fbe36634ae4186b0706232f9a200955baa051a74be47b WatchSource:0}: Error finding container 8c8bf4831eb9b136f17fbe36634ae4186b0706232f9a200955baa051a74be47b: Status 404 returned error can't find the container with id 8c8bf4831eb9b136f17fbe36634ae4186b0706232f9a200955baa051a74be47b Jan 30 12:24:02 crc kubenswrapper[4703]: I0130 12:24:02.917366 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9b602062-70c1-4485-87b8-a4aedb97350d","Type":"ContainerStarted","Data":"651725905bc1f761280a16d85eb941af8b9b57d20d5997b9b96192c89c0919c9"} Jan 30 12:24:02 crc kubenswrapper[4703]: I0130 12:24:02.917886 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"9b602062-70c1-4485-87b8-a4aedb97350d","Type":"ContainerStarted","Data":"8c8bf4831eb9b136f17fbe36634ae4186b0706232f9a200955baa051a74be47b"} Jan 30 12:24:02 crc kubenswrapper[4703]: I0130 12:24:02.919051 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2dc8f562-ab16-4028-8142-2fffd6f42a44","Type":"ContainerStarted","Data":"faf8f61aa0c3ef41ad8c69cdb0bafbf5c6b050feb8ead1b4586859fa23ba06f0"} Jan 30 12:24:02 crc kubenswrapper[4703]: I0130 12:24:02.919081 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2dc8f562-ab16-4028-8142-2fffd6f42a44","Type":"ContainerStarted","Data":"cd80b84024cdb7a627f658bae556313655a2cbc87f9966eed164ea09bb45da19"} Jan 30 12:24:02 crc kubenswrapper[4703]: I0130 12:24:02.948754 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=1.948720538 podStartE2EDuration="1.948720538s" podCreationTimestamp="2026-01-30 12:24:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:24:02.938044622 +0000 UTC m=+1678.715866286" watchObservedRunningTime="2026-01-30 12:24:02.948720538 +0000 UTC m=+1678.726542192" Jan 30 12:24:02 crc kubenswrapper[4703]: I0130 12:24:02.986972 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.986945853 podStartE2EDuration="2.986945853s" podCreationTimestamp="2026-01-30 12:24:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:24:02.972768553 +0000 UTC m=+1678.750590217" watchObservedRunningTime="2026-01-30 12:24:02.986945853 +0000 UTC m=+1678.764767507" Jan 30 12:24:03 crc kubenswrapper[4703]: I0130 12:24:03.105036 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="355a9e7b-78db-4e03-997d-acc6136442e4" path="/var/lib/kubelet/pods/355a9e7b-78db-4e03-997d-acc6136442e4/volumes" Jan 30 12:24:03 crc kubenswrapper[4703]: I0130 12:24:03.143264 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 30 12:24:03 crc kubenswrapper[4703]: I0130 12:24:03.144974 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 30 12:24:03 crc kubenswrapper[4703]: I0130 12:24:03.145584 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 30 12:24:03 crc kubenswrapper[4703]: I0130 12:24:03.149176 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 30 12:24:03 crc kubenswrapper[4703]: I0130 12:24:03.932861 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 30 12:24:03 crc kubenswrapper[4703]: I0130 12:24:03.938082 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.170888 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-nb8zq"] Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.187250 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.236907 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-nb8zq"] Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.238931 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.238992 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kz2nl\" (UniqueName: \"kubernetes.io/projected/dbda6f00-1625-4d38-9932-de68c1af9bfe-kube-api-access-kz2nl\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.262316 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-config\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.262635 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.262723 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.262849 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.365429 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.365515 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.365574 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.365654 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.365697 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kz2nl\" (UniqueName: \"kubernetes.io/projected/dbda6f00-1625-4d38-9932-de68c1af9bfe-kube-api-access-kz2nl\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.365797 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-config\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.366596 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-ovsdbserver-sb\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.367079 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-dns-swift-storage-0\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.367424 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-ovsdbserver-nb\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.367458 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-dns-svc\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.367486 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-config\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.391077 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kz2nl\" (UniqueName: \"kubernetes.io/projected/dbda6f00-1625-4d38-9932-de68c1af9bfe-kube-api-access-kz2nl\") pod \"dnsmasq-dns-89c5cd4d5-nb8zq\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:04 crc kubenswrapper[4703]: I0130 12:24:04.552662 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:05 crc kubenswrapper[4703]: I0130 12:24:05.123419 4703 scope.go:117] "RemoveContainer" containerID="bc692c50fb600574754b3e0a1136b2015ad236386acb6358e1bce88cb71e9b73" Jan 30 12:24:05 crc kubenswrapper[4703]: I0130 12:24:05.305285 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-nb8zq"] Jan 30 12:24:05 crc kubenswrapper[4703]: I0130 12:24:05.973604 4703 generic.go:334] "Generic (PLEG): container finished" podID="dbda6f00-1625-4d38-9932-de68c1af9bfe" containerID="113d15d28afeb2077495878e5cb34ba57dd4e6d8d26fe1e8afd018b5b74ac25f" exitCode=0 Jan 30 12:24:05 crc kubenswrapper[4703]: I0130 12:24:05.973848 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" event={"ID":"dbda6f00-1625-4d38-9932-de68c1af9bfe","Type":"ContainerDied","Data":"113d15d28afeb2077495878e5cb34ba57dd4e6d8d26fe1e8afd018b5b74ac25f"} Jan 30 12:24:05 crc kubenswrapper[4703]: I0130 12:24:05.974366 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" event={"ID":"dbda6f00-1625-4d38-9932-de68c1af9bfe","Type":"ContainerStarted","Data":"a91db1eccd55fee8ccf9083febf4ccec59adb1bf42612ea1674439fce626965a"} Jan 30 12:24:05 crc kubenswrapper[4703]: I0130 12:24:05.977232 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"04da2786-8d07-4399-9200-95c1f596a97b","Type":"ContainerStarted","Data":"e79d487b705f01d50335082c477a60d434f7bbfe2992cd7404cc8c776028602e"} Jan 30 12:24:06 crc kubenswrapper[4703]: I0130 12:24:06.321354 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 30 12:24:06 crc kubenswrapper[4703]: I0130 12:24:06.321801 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 30 12:24:06 crc kubenswrapper[4703]: I0130 12:24:06.580852 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:07 crc kubenswrapper[4703]: I0130 12:24:07.016259 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" event={"ID":"dbda6f00-1625-4d38-9932-de68c1af9bfe","Type":"ContainerStarted","Data":"3805a13646b6ee14a03c75ce077d134e480717508c8322e4c44d0725a6da73a7"} Jan 30 12:24:07 crc kubenswrapper[4703]: I0130 12:24:07.016512 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:07 crc kubenswrapper[4703]: I0130 12:24:07.059098 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 30 12:24:07 crc kubenswrapper[4703]: I0130 12:24:07.059368 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0763380e-e32e-44b0-af83-61e43f0b5eeb" containerName="nova-api-log" containerID="cri-o://eab36726edd9a9c79d9f4785e026c0dd0d0cb555bab1949db44ffe7cc6b5b302" gracePeriod=30 Jan 30 12:24:07 crc kubenswrapper[4703]: I0130 12:24:07.059866 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0763380e-e32e-44b0-af83-61e43f0b5eeb" containerName="nova-api-api" containerID="cri-o://b3aaab44e4e2c7a1b909855bc0a361d54ac0cddbfaabcd07b5ad7dc49bb16a01" gracePeriod=30 Jan 30 12:24:07 crc kubenswrapper[4703]: I0130 12:24:07.071901 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" podStartSLOduration=3.071877045 podStartE2EDuration="3.071877045s" podCreationTimestamp="2026-01-30 12:24:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:24:07.043363297 +0000 UTC m=+1682.821184951" watchObservedRunningTime="2026-01-30 12:24:07.071877045 +0000 UTC m=+1682.849698689" Jan 30 12:24:07 crc kubenswrapper[4703]: I0130 12:24:07.270616 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:24:07 crc kubenswrapper[4703]: I0130 12:24:07.271011 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerName="ceilometer-central-agent" containerID="cri-o://69db2c1b807f8c4f8d8d79d91c8b6b8f098aaf0f8977af277cbbdb3f203623b1" gracePeriod=30 Jan 30 12:24:07 crc kubenswrapper[4703]: I0130 12:24:07.271199 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerName="ceilometer-notification-agent" containerID="cri-o://fdb261a08857adc57aecf865bcf3d50fc7a90a85bd4d8c1e9fc3530a43554664" gracePeriod=30 Jan 30 12:24:07 crc kubenswrapper[4703]: I0130 12:24:07.271274 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerName="proxy-httpd" containerID="cri-o://f990b2036d7a9ba04c5e37ed8108dde74561e08c81afcce07ab34e32c03eec13" gracePeriod=30 Jan 30 12:24:07 crc kubenswrapper[4703]: I0130 12:24:07.271178 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerName="sg-core" containerID="cri-o://c24e990f2786d3d6118707c4be0d49a7868269a4ffc8055b6a64223b7b144656" gracePeriod=30 Jan 30 12:24:07 crc kubenswrapper[4703]: I0130 12:24:07.282562 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.217:3000/\": EOF" Jan 30 12:24:08 crc kubenswrapper[4703]: I0130 12:24:08.033487 4703 generic.go:334] "Generic (PLEG): container finished" podID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerID="f990b2036d7a9ba04c5e37ed8108dde74561e08c81afcce07ab34e32c03eec13" exitCode=0 Jan 30 12:24:08 crc kubenswrapper[4703]: I0130 12:24:08.033812 4703 generic.go:334] "Generic (PLEG): container finished" podID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerID="c24e990f2786d3d6118707c4be0d49a7868269a4ffc8055b6a64223b7b144656" exitCode=2 Jan 30 12:24:08 crc kubenswrapper[4703]: I0130 12:24:08.033826 4703 generic.go:334] "Generic (PLEG): container finished" podID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerID="69db2c1b807f8c4f8d8d79d91c8b6b8f098aaf0f8977af277cbbdb3f203623b1" exitCode=0 Jan 30 12:24:08 crc kubenswrapper[4703]: I0130 12:24:08.033676 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5334320c-225b-4cbe-b488-c0a9d27af74a","Type":"ContainerDied","Data":"f990b2036d7a9ba04c5e37ed8108dde74561e08c81afcce07ab34e32c03eec13"} Jan 30 12:24:08 crc kubenswrapper[4703]: I0130 12:24:08.033904 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5334320c-225b-4cbe-b488-c0a9d27af74a","Type":"ContainerDied","Data":"c24e990f2786d3d6118707c4be0d49a7868269a4ffc8055b6a64223b7b144656"} Jan 30 12:24:08 crc kubenswrapper[4703]: I0130 12:24:08.033922 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5334320c-225b-4cbe-b488-c0a9d27af74a","Type":"ContainerDied","Data":"69db2c1b807f8c4f8d8d79d91c8b6b8f098aaf0f8977af277cbbdb3f203623b1"} Jan 30 12:24:08 crc kubenswrapper[4703]: I0130 12:24:08.036757 4703 generic.go:334] "Generic (PLEG): container finished" podID="0763380e-e32e-44b0-af83-61e43f0b5eeb" containerID="eab36726edd9a9c79d9f4785e026c0dd0d0cb555bab1949db44ffe7cc6b5b302" exitCode=143 Jan 30 12:24:08 crc kubenswrapper[4703]: I0130 12:24:08.038061 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0763380e-e32e-44b0-af83-61e43f0b5eeb","Type":"ContainerDied","Data":"eab36726edd9a9c79d9f4785e026c0dd0d0cb555bab1949db44ffe7cc6b5b302"} Jan 30 12:24:10 crc kubenswrapper[4703]: I0130 12:24:10.074233 4703 generic.go:334] "Generic (PLEG): container finished" podID="04da2786-8d07-4399-9200-95c1f596a97b" containerID="e79d487b705f01d50335082c477a60d434f7bbfe2992cd7404cc8c776028602e" exitCode=1 Jan 30 12:24:10 crc kubenswrapper[4703]: I0130 12:24:10.074393 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"04da2786-8d07-4399-9200-95c1f596a97b","Type":"ContainerDied","Data":"e79d487b705f01d50335082c477a60d434f7bbfe2992cd7404cc8c776028602e"} Jan 30 12:24:10 crc kubenswrapper[4703]: I0130 12:24:10.074596 4703 scope.go:117] "RemoveContainer" containerID="bc692c50fb600574754b3e0a1136b2015ad236386acb6358e1bce88cb71e9b73" Jan 30 12:24:10 crc kubenswrapper[4703]: I0130 12:24:10.075900 4703 scope.go:117] "RemoveContainer" containerID="e79d487b705f01d50335082c477a60d434f7bbfe2992cd7404cc8c776028602e" Jan 30 12:24:10 crc kubenswrapper[4703]: E0130 12:24:10.076377 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 20s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(04da2786-8d07-4399-9200-95c1f596a97b)\"" pod="openstack/nova-scheduler-0" podUID="04da2786-8d07-4399-9200-95c1f596a97b" Jan 30 12:24:10 crc kubenswrapper[4703]: I0130 12:24:10.778802 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:24:10 crc kubenswrapper[4703]: I0130 12:24:10.779344 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:24:10 crc kubenswrapper[4703]: I0130 12:24:10.779358 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:24:10 crc kubenswrapper[4703]: I0130 12:24:10.779404 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:24:10 crc kubenswrapper[4703]: I0130 12:24:10.959581 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 30 12:24:10 crc kubenswrapper[4703]: I0130 12:24:10.987594 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0763380e-e32e-44b0-af83-61e43f0b5eeb-combined-ca-bundle\") pod \"0763380e-e32e-44b0-af83-61e43f0b5eeb\" (UID: \"0763380e-e32e-44b0-af83-61e43f0b5eeb\") " Jan 30 12:24:10 crc kubenswrapper[4703]: I0130 12:24:10.987894 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-td7m9\" (UniqueName: \"kubernetes.io/projected/0763380e-e32e-44b0-af83-61e43f0b5eeb-kube-api-access-td7m9\") pod \"0763380e-e32e-44b0-af83-61e43f0b5eeb\" (UID: \"0763380e-e32e-44b0-af83-61e43f0b5eeb\") " Jan 30 12:24:10 crc kubenswrapper[4703]: I0130 12:24:10.988531 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0763380e-e32e-44b0-af83-61e43f0b5eeb-logs\") pod \"0763380e-e32e-44b0-af83-61e43f0b5eeb\" (UID: \"0763380e-e32e-44b0-af83-61e43f0b5eeb\") " Jan 30 12:24:10 crc kubenswrapper[4703]: I0130 12:24:10.988774 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0763380e-e32e-44b0-af83-61e43f0b5eeb-config-data\") pod \"0763380e-e32e-44b0-af83-61e43f0b5eeb\" (UID: \"0763380e-e32e-44b0-af83-61e43f0b5eeb\") " Jan 30 12:24:10 crc kubenswrapper[4703]: I0130 12:24:10.991197 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0763380e-e32e-44b0-af83-61e43f0b5eeb-logs" (OuterVolumeSpecName: "logs") pod "0763380e-e32e-44b0-af83-61e43f0b5eeb" (UID: "0763380e-e32e-44b0-af83-61e43f0b5eeb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.009454 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0763380e-e32e-44b0-af83-61e43f0b5eeb-kube-api-access-td7m9" (OuterVolumeSpecName: "kube-api-access-td7m9") pod "0763380e-e32e-44b0-af83-61e43f0b5eeb" (UID: "0763380e-e32e-44b0-af83-61e43f0b5eeb"). InnerVolumeSpecName "kube-api-access-td7m9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.068916 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0763380e-e32e-44b0-af83-61e43f0b5eeb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0763380e-e32e-44b0-af83-61e43f0b5eeb" (UID: "0763380e-e32e-44b0-af83-61e43f0b5eeb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.096812 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-td7m9\" (UniqueName: \"kubernetes.io/projected/0763380e-e32e-44b0-af83-61e43f0b5eeb-kube-api-access-td7m9\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.096854 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0763380e-e32e-44b0-af83-61e43f0b5eeb-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.096864 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0763380e-e32e-44b0-af83-61e43f0b5eeb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.122325 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0763380e-e32e-44b0-af83-61e43f0b5eeb-config-data" (OuterVolumeSpecName: "config-data") pod "0763380e-e32e-44b0-af83-61e43f0b5eeb" (UID: "0763380e-e32e-44b0-af83-61e43f0b5eeb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.130207 4703 generic.go:334] "Generic (PLEG): container finished" podID="0763380e-e32e-44b0-af83-61e43f0b5eeb" containerID="b3aaab44e4e2c7a1b909855bc0a361d54ac0cddbfaabcd07b5ad7dc49bb16a01" exitCode=0 Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.130602 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.134768 4703 generic.go:334] "Generic (PLEG): container finished" podID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerID="fdb261a08857adc57aecf865bcf3d50fc7a90a85bd4d8c1e9fc3530a43554664" exitCode=0 Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.140516 4703 scope.go:117] "RemoveContainer" containerID="e79d487b705f01d50335082c477a60d434f7bbfe2992cd7404cc8c776028602e" Jan 30 12:24:11 crc kubenswrapper[4703]: E0130 12:24:11.140914 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 20s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(04da2786-8d07-4399-9200-95c1f596a97b)\"" pod="openstack/nova-scheduler-0" podUID="04da2786-8d07-4399-9200-95c1f596a97b" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.219582 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0763380e-e32e-44b0-af83-61e43f0b5eeb-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.268387 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0763380e-e32e-44b0-af83-61e43f0b5eeb","Type":"ContainerDied","Data":"b3aaab44e4e2c7a1b909855bc0a361d54ac0cddbfaabcd07b5ad7dc49bb16a01"} Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.268528 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0763380e-e32e-44b0-af83-61e43f0b5eeb","Type":"ContainerDied","Data":"91fd62b45ad39c550162b9bb41534f147eebe30ccf8091f10445dfd79614947f"} Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.268591 4703 scope.go:117] "RemoveContainer" containerID="b3aaab44e4e2c7a1b909855bc0a361d54ac0cddbfaabcd07b5ad7dc49bb16a01" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.268790 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5334320c-225b-4cbe-b488-c0a9d27af74a","Type":"ContainerDied","Data":"fdb261a08857adc57aecf865bcf3d50fc7a90a85bd4d8c1e9fc3530a43554664"} Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.283971 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.306281 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.309197 4703 scope.go:117] "RemoveContainer" containerID="eab36726edd9a9c79d9f4785e026c0dd0d0cb555bab1949db44ffe7cc6b5b302" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.326457 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.326597 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5334320c-225b-4cbe-b488-c0a9d27af74a-run-httpd\") pod \"5334320c-225b-4cbe-b488-c0a9d27af74a\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.326681 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5334320c-225b-4cbe-b488-c0a9d27af74a-log-httpd\") pod \"5334320c-225b-4cbe-b488-c0a9d27af74a\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.326729 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-br9tx\" (UniqueName: \"kubernetes.io/projected/5334320c-225b-4cbe-b488-c0a9d27af74a-kube-api-access-br9tx\") pod \"5334320c-225b-4cbe-b488-c0a9d27af74a\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.326877 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-config-data\") pod \"5334320c-225b-4cbe-b488-c0a9d27af74a\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.326940 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-sg-core-conf-yaml\") pod \"5334320c-225b-4cbe-b488-c0a9d27af74a\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.327039 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-scripts\") pod \"5334320c-225b-4cbe-b488-c0a9d27af74a\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.327062 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-ceilometer-tls-certs\") pod \"5334320c-225b-4cbe-b488-c0a9d27af74a\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.327089 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-combined-ca-bundle\") pod \"5334320c-225b-4cbe-b488-c0a9d27af74a\" (UID: \"5334320c-225b-4cbe-b488-c0a9d27af74a\") " Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.330141 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.339222 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.340185 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5334320c-225b-4cbe-b488-c0a9d27af74a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5334320c-225b-4cbe-b488-c0a9d27af74a" (UID: "5334320c-225b-4cbe-b488-c0a9d27af74a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.340374 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5334320c-225b-4cbe-b488-c0a9d27af74a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5334320c-225b-4cbe-b488-c0a9d27af74a" (UID: "5334320c-225b-4cbe-b488-c0a9d27af74a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.344396 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-scripts" (OuterVolumeSpecName: "scripts") pod "5334320c-225b-4cbe-b488-c0a9d27af74a" (UID: "5334320c-225b-4cbe-b488-c0a9d27af74a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.362192 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.362416 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5334320c-225b-4cbe-b488-c0a9d27af74a-kube-api-access-br9tx" (OuterVolumeSpecName: "kube-api-access-br9tx") pod "5334320c-225b-4cbe-b488-c0a9d27af74a" (UID: "5334320c-225b-4cbe-b488-c0a9d27af74a"). InnerVolumeSpecName "kube-api-access-br9tx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:24:11 crc kubenswrapper[4703]: E0130 12:24:11.362792 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0763380e-e32e-44b0-af83-61e43f0b5eeb" containerName="nova-api-log" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.362810 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="0763380e-e32e-44b0-af83-61e43f0b5eeb" containerName="nova-api-log" Jan 30 12:24:11 crc kubenswrapper[4703]: E0130 12:24:11.362822 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0763380e-e32e-44b0-af83-61e43f0b5eeb" containerName="nova-api-api" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.362829 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="0763380e-e32e-44b0-af83-61e43f0b5eeb" containerName="nova-api-api" Jan 30 12:24:11 crc kubenswrapper[4703]: E0130 12:24:11.362852 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerName="proxy-httpd" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.362858 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerName="proxy-httpd" Jan 30 12:24:11 crc kubenswrapper[4703]: E0130 12:24:11.362870 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerName="ceilometer-notification-agent" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.362876 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerName="ceilometer-notification-agent" Jan 30 12:24:11 crc kubenswrapper[4703]: E0130 12:24:11.362897 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerName="sg-core" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.362902 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerName="sg-core" Jan 30 12:24:11 crc kubenswrapper[4703]: E0130 12:24:11.362916 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerName="ceilometer-central-agent" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.362922 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerName="ceilometer-central-agent" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.363139 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="0763380e-e32e-44b0-af83-61e43f0b5eeb" containerName="nova-api-log" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.363150 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerName="ceilometer-notification-agent" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.363158 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerName="sg-core" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.363166 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerName="ceilometer-central-agent" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.363177 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="0763380e-e32e-44b0-af83-61e43f0b5eeb" containerName="nova-api-api" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.363196 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" containerName="proxy-httpd" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.364404 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.378707 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.378950 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.379630 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.397133 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5334320c-225b-4cbe-b488-c0a9d27af74a" (UID: "5334320c-225b-4cbe-b488-c0a9d27af74a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.397467 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.412066 4703 scope.go:117] "RemoveContainer" containerID="b3aaab44e4e2c7a1b909855bc0a361d54ac0cddbfaabcd07b5ad7dc49bb16a01" Jan 30 12:24:11 crc kubenswrapper[4703]: E0130 12:24:11.412763 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3aaab44e4e2c7a1b909855bc0a361d54ac0cddbfaabcd07b5ad7dc49bb16a01\": container with ID starting with b3aaab44e4e2c7a1b909855bc0a361d54ac0cddbfaabcd07b5ad7dc49bb16a01 not found: ID does not exist" containerID="b3aaab44e4e2c7a1b909855bc0a361d54ac0cddbfaabcd07b5ad7dc49bb16a01" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.412800 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3aaab44e4e2c7a1b909855bc0a361d54ac0cddbfaabcd07b5ad7dc49bb16a01"} err="failed to get container status \"b3aaab44e4e2c7a1b909855bc0a361d54ac0cddbfaabcd07b5ad7dc49bb16a01\": rpc error: code = NotFound desc = could not find container \"b3aaab44e4e2c7a1b909855bc0a361d54ac0cddbfaabcd07b5ad7dc49bb16a01\": container with ID starting with b3aaab44e4e2c7a1b909855bc0a361d54ac0cddbfaabcd07b5ad7dc49bb16a01 not found: ID does not exist" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.412825 4703 scope.go:117] "RemoveContainer" containerID="eab36726edd9a9c79d9f4785e026c0dd0d0cb555bab1949db44ffe7cc6b5b302" Jan 30 12:24:11 crc kubenswrapper[4703]: E0130 12:24:11.413278 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eab36726edd9a9c79d9f4785e026c0dd0d0cb555bab1949db44ffe7cc6b5b302\": container with ID starting with eab36726edd9a9c79d9f4785e026c0dd0d0cb555bab1949db44ffe7cc6b5b302 not found: ID does not exist" containerID="eab36726edd9a9c79d9f4785e026c0dd0d0cb555bab1949db44ffe7cc6b5b302" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.413358 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eab36726edd9a9c79d9f4785e026c0dd0d0cb555bab1949db44ffe7cc6b5b302"} err="failed to get container status \"eab36726edd9a9c79d9f4785e026c0dd0d0cb555bab1949db44ffe7cc6b5b302\": rpc error: code = NotFound desc = could not find container \"eab36726edd9a9c79d9f4785e026c0dd0d0cb555bab1949db44ffe7cc6b5b302\": container with ID starting with eab36726edd9a9c79d9f4785e026c0dd0d0cb555bab1949db44ffe7cc6b5b302 not found: ID does not exist" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.431310 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j69zz\" (UniqueName: \"kubernetes.io/projected/42b1e803-4494-486f-960f-df4f64759fbf-kube-api-access-j69zz\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.431415 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.431516 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42b1e803-4494-486f-960f-df4f64759fbf-logs\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.431562 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-public-tls-certs\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.431596 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-internal-tls-certs\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.431628 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-config-data\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.431721 4703 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5334320c-225b-4cbe-b488-c0a9d27af74a-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.431737 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-br9tx\" (UniqueName: \"kubernetes.io/projected/5334320c-225b-4cbe-b488-c0a9d27af74a-kube-api-access-br9tx\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.431751 4703 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.431764 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.431775 4703 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5334320c-225b-4cbe-b488-c0a9d27af74a-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.456790 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "5334320c-225b-4cbe-b488-c0a9d27af74a" (UID: "5334320c-225b-4cbe-b488-c0a9d27af74a"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.493478 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5334320c-225b-4cbe-b488-c0a9d27af74a" (UID: "5334320c-225b-4cbe-b488-c0a9d27af74a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.533884 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42b1e803-4494-486f-960f-df4f64759fbf-logs\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.533969 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-public-tls-certs\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.534008 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-internal-tls-certs\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.534037 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-config-data\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.534227 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j69zz\" (UniqueName: \"kubernetes.io/projected/42b1e803-4494-486f-960f-df4f64759fbf-kube-api-access-j69zz\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.534269 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.534376 4703 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.534389 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.535482 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42b1e803-4494-486f-960f-df4f64759fbf-logs\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.540252 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.546522 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-internal-tls-certs\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.546891 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-public-tls-certs\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.550897 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-config-data\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.556148 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j69zz\" (UniqueName: \"kubernetes.io/projected/42b1e803-4494-486f-960f-df4f64759fbf-kube-api-access-j69zz\") pod \"nova-api-0\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " pod="openstack/nova-api-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.557217 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-config-data" (OuterVolumeSpecName: "config-data") pod "5334320c-225b-4cbe-b488-c0a9d27af74a" (UID: "5334320c-225b-4cbe-b488-c0a9d27af74a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.581221 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.619791 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.636575 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5334320c-225b-4cbe-b488-c0a9d27af74a-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:11 crc kubenswrapper[4703]: I0130 12:24:11.792483 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.157544 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5334320c-225b-4cbe-b488-c0a9d27af74a","Type":"ContainerDied","Data":"96eb8659686cbf15e6f797c4e028b01d9b92164064448490a7f5267cbcd6f55a"} Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.157746 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.158014 4703 scope.go:117] "RemoveContainer" containerID="f990b2036d7a9ba04c5e37ed8108dde74561e08c81afcce07ab34e32c03eec13" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.159224 4703 scope.go:117] "RemoveContainer" containerID="e79d487b705f01d50335082c477a60d434f7bbfe2992cd7404cc8c776028602e" Jan 30 12:24:12 crc kubenswrapper[4703]: E0130 12:24:12.159689 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 20s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(04da2786-8d07-4399-9200-95c1f596a97b)\"" pod="openstack/nova-scheduler-0" podUID="04da2786-8d07-4399-9200-95c1f596a97b" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.185726 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.222087 4703 scope.go:117] "RemoveContainer" containerID="c24e990f2786d3d6118707c4be0d49a7868269a4ffc8055b6a64223b7b144656" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.316413 4703 scope.go:117] "RemoveContainer" containerID="fdb261a08857adc57aecf865bcf3d50fc7a90a85bd4d8c1e9fc3530a43554664" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.336570 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="2dc8f562-ab16-4028-8142-2fffd6f42a44" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.218:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.344884 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="2dc8f562-ab16-4028-8142-2fffd6f42a44" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.218:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.350034 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.377228 4703 scope.go:117] "RemoveContainer" containerID="69db2c1b807f8c4f8d8d79d91c8b6b8f098aaf0f8977af277cbbdb3f203623b1" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.397970 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.454879 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.481957 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.482134 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.500963 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.501422 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.502149 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.506481 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.539318 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-gxvg2"] Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.541904 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gxvg2" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.557652 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.567587 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.596219 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-gxvg2"] Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.599009 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-gxvg2\" (UID: \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\") " pod="openstack/nova-cell1-cell-mapping-gxvg2" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.599078 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3327faec-a7ba-4cd1-987e-b9642a6d6eed-run-httpd\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.599132 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3327faec-a7ba-4cd1-987e-b9642a6d6eed-config-data\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.599164 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3327faec-a7ba-4cd1-987e-b9642a6d6eed-log-httpd\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.599184 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pnz9\" (UniqueName: \"kubernetes.io/projected/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-kube-api-access-8pnz9\") pod \"nova-cell1-cell-mapping-gxvg2\" (UID: \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\") " pod="openstack/nova-cell1-cell-mapping-gxvg2" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.599214 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3327faec-a7ba-4cd1-987e-b9642a6d6eed-scripts\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.599233 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3327faec-a7ba-4cd1-987e-b9642a6d6eed-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.599273 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3327faec-a7ba-4cd1-987e-b9642a6d6eed-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.599308 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-scripts\") pod \"nova-cell1-cell-mapping-gxvg2\" (UID: \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\") " pod="openstack/nova-cell1-cell-mapping-gxvg2" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.599341 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3327faec-a7ba-4cd1-987e-b9642a6d6eed-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.599409 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-config-data\") pod \"nova-cell1-cell-mapping-gxvg2\" (UID: \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\") " pod="openstack/nova-cell1-cell-mapping-gxvg2" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.599458 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjh4r\" (UniqueName: \"kubernetes.io/projected/3327faec-a7ba-4cd1-987e-b9642a6d6eed-kube-api-access-cjh4r\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.701915 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3327faec-a7ba-4cd1-987e-b9642a6d6eed-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.702014 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-scripts\") pod \"nova-cell1-cell-mapping-gxvg2\" (UID: \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\") " pod="openstack/nova-cell1-cell-mapping-gxvg2" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.702069 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3327faec-a7ba-4cd1-987e-b9642a6d6eed-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.702185 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-config-data\") pod \"nova-cell1-cell-mapping-gxvg2\" (UID: \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\") " pod="openstack/nova-cell1-cell-mapping-gxvg2" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.702252 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjh4r\" (UniqueName: \"kubernetes.io/projected/3327faec-a7ba-4cd1-987e-b9642a6d6eed-kube-api-access-cjh4r\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.702325 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-gxvg2\" (UID: \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\") " pod="openstack/nova-cell1-cell-mapping-gxvg2" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.702356 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3327faec-a7ba-4cd1-987e-b9642a6d6eed-run-httpd\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.702382 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3327faec-a7ba-4cd1-987e-b9642a6d6eed-config-data\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.702425 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pnz9\" (UniqueName: \"kubernetes.io/projected/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-kube-api-access-8pnz9\") pod \"nova-cell1-cell-mapping-gxvg2\" (UID: \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\") " pod="openstack/nova-cell1-cell-mapping-gxvg2" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.702450 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3327faec-a7ba-4cd1-987e-b9642a6d6eed-log-httpd\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.702483 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3327faec-a7ba-4cd1-987e-b9642a6d6eed-scripts\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.702507 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3327faec-a7ba-4cd1-987e-b9642a6d6eed-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.705143 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3327faec-a7ba-4cd1-987e-b9642a6d6eed-run-httpd\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.705596 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3327faec-a7ba-4cd1-987e-b9642a6d6eed-log-httpd\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.712606 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-scripts\") pod \"nova-cell1-cell-mapping-gxvg2\" (UID: \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\") " pod="openstack/nova-cell1-cell-mapping-gxvg2" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.728021 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/3327faec-a7ba-4cd1-987e-b9642a6d6eed-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.735101 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3327faec-a7ba-4cd1-987e-b9642a6d6eed-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.739754 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pnz9\" (UniqueName: \"kubernetes.io/projected/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-kube-api-access-8pnz9\") pod \"nova-cell1-cell-mapping-gxvg2\" (UID: \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\") " pod="openstack/nova-cell1-cell-mapping-gxvg2" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.741096 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-gxvg2\" (UID: \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\") " pod="openstack/nova-cell1-cell-mapping-gxvg2" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.741389 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3327faec-a7ba-4cd1-987e-b9642a6d6eed-config-data\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.748869 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-config-data\") pod \"nova-cell1-cell-mapping-gxvg2\" (UID: \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\") " pod="openstack/nova-cell1-cell-mapping-gxvg2" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.750154 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjh4r\" (UniqueName: \"kubernetes.io/projected/3327faec-a7ba-4cd1-987e-b9642a6d6eed-kube-api-access-cjh4r\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.761459 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/3327faec-a7ba-4cd1-987e-b9642a6d6eed-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.767018 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3327faec-a7ba-4cd1-987e-b9642a6d6eed-scripts\") pod \"ceilometer-0\" (UID: \"3327faec-a7ba-4cd1-987e-b9642a6d6eed\") " pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.826358 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.826441 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.865902 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 30 12:24:12 crc kubenswrapper[4703]: I0130 12:24:12.876020 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gxvg2" Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.067064 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rcq2z"] Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.074605 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rcq2z" Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.120388 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46k89\" (UniqueName: \"kubernetes.io/projected/620650ab-34d8-4489-b809-a799a1e64c5c-kube-api-access-46k89\") pod \"redhat-marketplace-rcq2z\" (UID: \"620650ab-34d8-4489-b809-a799a1e64c5c\") " pod="openshift-marketplace/redhat-marketplace-rcq2z" Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.120504 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/620650ab-34d8-4489-b809-a799a1e64c5c-utilities\") pod \"redhat-marketplace-rcq2z\" (UID: \"620650ab-34d8-4489-b809-a799a1e64c5c\") " pod="openshift-marketplace/redhat-marketplace-rcq2z" Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.120656 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/620650ab-34d8-4489-b809-a799a1e64c5c-catalog-content\") pod \"redhat-marketplace-rcq2z\" (UID: \"620650ab-34d8-4489-b809-a799a1e64c5c\") " pod="openshift-marketplace/redhat-marketplace-rcq2z" Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.235189 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0763380e-e32e-44b0-af83-61e43f0b5eeb" path="/var/lib/kubelet/pods/0763380e-e32e-44b0-af83-61e43f0b5eeb/volumes" Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.235926 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5334320c-225b-4cbe-b488-c0a9d27af74a" path="/var/lib/kubelet/pods/5334320c-225b-4cbe-b488-c0a9d27af74a/volumes" Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.236738 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46k89\" (UniqueName: \"kubernetes.io/projected/620650ab-34d8-4489-b809-a799a1e64c5c-kube-api-access-46k89\") pod \"redhat-marketplace-rcq2z\" (UID: \"620650ab-34d8-4489-b809-a799a1e64c5c\") " pod="openshift-marketplace/redhat-marketplace-rcq2z" Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.236869 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rcq2z"] Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.236874 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/620650ab-34d8-4489-b809-a799a1e64c5c-utilities\") pod \"redhat-marketplace-rcq2z\" (UID: \"620650ab-34d8-4489-b809-a799a1e64c5c\") " pod="openshift-marketplace/redhat-marketplace-rcq2z" Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.237934 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"42b1e803-4494-486f-960f-df4f64759fbf","Type":"ContainerStarted","Data":"08c55054ffe029f8a1bc7051b49066107eb51efa3418370d09cd88cb4a1e533d"} Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.237962 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"42b1e803-4494-486f-960f-df4f64759fbf","Type":"ContainerStarted","Data":"fed30f35551a4abb9d7a5ce4db9aa50094644ffd6c751cd1c81ef219b979953f"} Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.240976 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/620650ab-34d8-4489-b809-a799a1e64c5c-utilities\") pod \"redhat-marketplace-rcq2z\" (UID: \"620650ab-34d8-4489-b809-a799a1e64c5c\") " pod="openshift-marketplace/redhat-marketplace-rcq2z" Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.248834 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/620650ab-34d8-4489-b809-a799a1e64c5c-catalog-content\") pod \"redhat-marketplace-rcq2z\" (UID: \"620650ab-34d8-4489-b809-a799a1e64c5c\") " pod="openshift-marketplace/redhat-marketplace-rcq2z" Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.249631 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/620650ab-34d8-4489-b809-a799a1e64c5c-catalog-content\") pod \"redhat-marketplace-rcq2z\" (UID: \"620650ab-34d8-4489-b809-a799a1e64c5c\") " pod="openshift-marketplace/redhat-marketplace-rcq2z" Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.284661 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46k89\" (UniqueName: \"kubernetes.io/projected/620650ab-34d8-4489-b809-a799a1e64c5c-kube-api-access-46k89\") pod \"redhat-marketplace-rcq2z\" (UID: \"620650ab-34d8-4489-b809-a799a1e64c5c\") " pod="openshift-marketplace/redhat-marketplace-rcq2z" Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.292710 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rcq2z" Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.550604 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-gxvg2"] Jan 30 12:24:13 crc kubenswrapper[4703]: I0130 12:24:13.893714 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 30 12:24:14 crc kubenswrapper[4703]: I0130 12:24:14.080485 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rcq2z"] Jan 30 12:24:14 crc kubenswrapper[4703]: I0130 12:24:14.256698 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3327faec-a7ba-4cd1-987e-b9642a6d6eed","Type":"ContainerStarted","Data":"48f75bfb7e5ffb25010f3b67e729ccfd798f060a1135115b56db01b78f0ba724"} Jan 30 12:24:14 crc kubenswrapper[4703]: I0130 12:24:14.263707 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gxvg2" event={"ID":"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51","Type":"ContainerStarted","Data":"253a146d77ad5c7781999b3fb1c220eefbd487e84ece1f3a1c3a39c289c862f8"} Jan 30 12:24:14 crc kubenswrapper[4703]: I0130 12:24:14.263774 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gxvg2" event={"ID":"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51","Type":"ContainerStarted","Data":"f7d3ac8731375dce5550d61ed50a564c8734a4b99e09b59c58417badc780a114"} Jan 30 12:24:14 crc kubenswrapper[4703]: I0130 12:24:14.265918 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rcq2z" event={"ID":"620650ab-34d8-4489-b809-a799a1e64c5c","Type":"ContainerStarted","Data":"11d518aef3eeea65a15ed0b5600ea9949c75a2ffb2f44a9c9e28b5eade8ce50b"} Jan 30 12:24:14 crc kubenswrapper[4703]: I0130 12:24:14.270045 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"42b1e803-4494-486f-960f-df4f64759fbf","Type":"ContainerStarted","Data":"a4f0fb423de093ae5f13852bb464e82abcb1d4fdc7972849b71a1e3a03cca89f"} Jan 30 12:24:14 crc kubenswrapper[4703]: I0130 12:24:14.289554 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-gxvg2" podStartSLOduration=2.289525927 podStartE2EDuration="2.289525927s" podCreationTimestamp="2026-01-30 12:24:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:24:14.284178301 +0000 UTC m=+1690.061999965" watchObservedRunningTime="2026-01-30 12:24:14.289525927 +0000 UTC m=+1690.067347581" Jan 30 12:24:14 crc kubenswrapper[4703]: I0130 12:24:14.350397 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.350368171 podStartE2EDuration="3.350368171s" podCreationTimestamp="2026-01-30 12:24:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:24:14.345455635 +0000 UTC m=+1690.123277279" watchObservedRunningTime="2026-01-30 12:24:14.350368171 +0000 UTC m=+1690.128189825" Jan 30 12:24:14 crc kubenswrapper[4703]: I0130 12:24:14.559278 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:24:14 crc kubenswrapper[4703]: I0130 12:24:14.725193 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-cccmv"] Jan 30 12:24:14 crc kubenswrapper[4703]: I0130 12:24:14.726364 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-757b4f8459-cccmv" podUID="c8357d8b-c1e1-4411-92c9-4de33313aaeb" containerName="dnsmasq-dns" containerID="cri-o://f96b4388e40767582588b210e283c06412984bc5aaac06d7107321a9a84ef6d7" gracePeriod=10 Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.309238 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3327faec-a7ba-4cd1-987e-b9642a6d6eed","Type":"ContainerStarted","Data":"5f3dc07d5196cad88c67aae3ee41ea3008d4db9a497b8a4d75a3235c41f7eaa2"} Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.324867 4703 generic.go:334] "Generic (PLEG): container finished" podID="620650ab-34d8-4489-b809-a799a1e64c5c" containerID="da12449b1242f14725d6a450a26b627760e43dd214daf07af63344b2eac1a5b8" exitCode=0 Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.325320 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rcq2z" event={"ID":"620650ab-34d8-4489-b809-a799a1e64c5c","Type":"ContainerDied","Data":"da12449b1242f14725d6a450a26b627760e43dd214daf07af63344b2eac1a5b8"} Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.342360 4703 generic.go:334] "Generic (PLEG): container finished" podID="c8357d8b-c1e1-4411-92c9-4de33313aaeb" containerID="f96b4388e40767582588b210e283c06412984bc5aaac06d7107321a9a84ef6d7" exitCode=0 Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.344016 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-cccmv" event={"ID":"c8357d8b-c1e1-4411-92c9-4de33313aaeb","Type":"ContainerDied","Data":"f96b4388e40767582588b210e283c06412984bc5aaac06d7107321a9a84ef6d7"} Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.344171 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-757b4f8459-cccmv" event={"ID":"c8357d8b-c1e1-4411-92c9-4de33313aaeb","Type":"ContainerDied","Data":"cf4350ffd91ce97eebc3bc0e19eb6d981e5793f46110d009ffc7048ff6d4ae7b"} Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.344207 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cf4350ffd91ce97eebc3bc0e19eb6d981e5793f46110d009ffc7048ff6d4ae7b" Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.488634 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.595429 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-dns-svc\") pod \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.596318 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-ovsdbserver-nb\") pod \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.596423 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-dns-swift-storage-0\") pod \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.598857 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5fkbn\" (UniqueName: \"kubernetes.io/projected/c8357d8b-c1e1-4411-92c9-4de33313aaeb-kube-api-access-5fkbn\") pod \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.599032 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-ovsdbserver-sb\") pod \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.599215 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-config\") pod \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.608863 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8357d8b-c1e1-4411-92c9-4de33313aaeb-kube-api-access-5fkbn" (OuterVolumeSpecName: "kube-api-access-5fkbn") pod "c8357d8b-c1e1-4411-92c9-4de33313aaeb" (UID: "c8357d8b-c1e1-4411-92c9-4de33313aaeb"). InnerVolumeSpecName "kube-api-access-5fkbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.690245 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c8357d8b-c1e1-4411-92c9-4de33313aaeb" (UID: "c8357d8b-c1e1-4411-92c9-4de33313aaeb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.702280 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c8357d8b-c1e1-4411-92c9-4de33313aaeb" (UID: "c8357d8b-c1e1-4411-92c9-4de33313aaeb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.702985 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-dns-svc\") pod \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\" (UID: \"c8357d8b-c1e1-4411-92c9-4de33313aaeb\") " Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.703655 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.704965 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5fkbn\" (UniqueName: \"kubernetes.io/projected/c8357d8b-c1e1-4411-92c9-4de33313aaeb-kube-api-access-5fkbn\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:15 crc kubenswrapper[4703]: W0130 12:24:15.705092 4703 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/c8357d8b-c1e1-4411-92c9-4de33313aaeb/volumes/kubernetes.io~configmap/dns-svc Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.705107 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c8357d8b-c1e1-4411-92c9-4de33313aaeb" (UID: "c8357d8b-c1e1-4411-92c9-4de33313aaeb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.712503 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c8357d8b-c1e1-4411-92c9-4de33313aaeb" (UID: "c8357d8b-c1e1-4411-92c9-4de33313aaeb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.716094 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c8357d8b-c1e1-4411-92c9-4de33313aaeb" (UID: "c8357d8b-c1e1-4411-92c9-4de33313aaeb"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.736445 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-config" (OuterVolumeSpecName: "config") pod "c8357d8b-c1e1-4411-92c9-4de33313aaeb" (UID: "c8357d8b-c1e1-4411-92c9-4de33313aaeb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.807399 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.808203 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.808221 4703 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:15 crc kubenswrapper[4703]: I0130 12:24:15.808233 4703 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c8357d8b-c1e1-4411-92c9-4de33313aaeb-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:16 crc kubenswrapper[4703]: I0130 12:24:16.382340 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-757b4f8459-cccmv" Jan 30 12:24:16 crc kubenswrapper[4703]: I0130 12:24:16.382307 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3327faec-a7ba-4cd1-987e-b9642a6d6eed","Type":"ContainerStarted","Data":"acf84e7a06223786b932dd3287a621c3a1d27b194f8a14e23b6e6468e558c2e1"} Jan 30 12:24:16 crc kubenswrapper[4703]: I0130 12:24:16.455433 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-cccmv"] Jan 30 12:24:16 crc kubenswrapper[4703]: I0130 12:24:16.479226 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-757b4f8459-cccmv"] Jan 30 12:24:17 crc kubenswrapper[4703]: I0130 12:24:17.104846 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8357d8b-c1e1-4411-92c9-4de33313aaeb" path="/var/lib/kubelet/pods/c8357d8b-c1e1-4411-92c9-4de33313aaeb/volumes" Jan 30 12:24:17 crc kubenswrapper[4703]: I0130 12:24:17.397186 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3327faec-a7ba-4cd1-987e-b9642a6d6eed","Type":"ContainerStarted","Data":"a3f002dc5558d5ccb7780fe4df5a665a71ab2b6c3f98c4a04f93e46c750abd83"} Jan 30 12:24:17 crc kubenswrapper[4703]: I0130 12:24:17.400209 4703 generic.go:334] "Generic (PLEG): container finished" podID="620650ab-34d8-4489-b809-a799a1e64c5c" containerID="101da32dd892123fc0217f2885e05fe9abcff87314ced2f26fbedbdc1fbbbbed" exitCode=0 Jan 30 12:24:17 crc kubenswrapper[4703]: I0130 12:24:17.400270 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rcq2z" event={"ID":"620650ab-34d8-4489-b809-a799a1e64c5c","Type":"ContainerDied","Data":"101da32dd892123fc0217f2885e05fe9abcff87314ced2f26fbedbdc1fbbbbed"} Jan 30 12:24:18 crc kubenswrapper[4703]: I0130 12:24:18.418200 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rcq2z" event={"ID":"620650ab-34d8-4489-b809-a799a1e64c5c","Type":"ContainerStarted","Data":"2f320830df6e5a3a6c5d69927ea38bc1bf6818f1a7bf44ffa993cc1a3bbc475a"} Jan 30 12:24:18 crc kubenswrapper[4703]: I0130 12:24:18.450249 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rcq2z" podStartSLOduration=2.847083027 podStartE2EDuration="5.450213286s" podCreationTimestamp="2026-01-30 12:24:13 +0000 UTC" firstStartedPulling="2026-01-30 12:24:15.353274462 +0000 UTC m=+1691.131096126" lastFinishedPulling="2026-01-30 12:24:17.956404731 +0000 UTC m=+1693.734226385" observedRunningTime="2026-01-30 12:24:18.438196389 +0000 UTC m=+1694.216018053" watchObservedRunningTime="2026-01-30 12:24:18.450213286 +0000 UTC m=+1694.228034940" Jan 30 12:24:20 crc kubenswrapper[4703]: I0130 12:24:20.454603 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"3327faec-a7ba-4cd1-987e-b9642a6d6eed","Type":"ContainerStarted","Data":"690f7d8b8ee0993974927eecfea7ca95986fdebb4ded31de4c94f750e4c9241d"} Jan 30 12:24:20 crc kubenswrapper[4703]: I0130 12:24:20.455587 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 30 12:24:20 crc kubenswrapper[4703]: I0130 12:24:20.502135 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.163329193 podStartE2EDuration="8.502092243s" podCreationTimestamp="2026-01-30 12:24:12 +0000 UTC" firstStartedPulling="2026-01-30 12:24:13.907380483 +0000 UTC m=+1689.685202137" lastFinishedPulling="2026-01-30 12:24:19.246143533 +0000 UTC m=+1695.023965187" observedRunningTime="2026-01-30 12:24:20.499006815 +0000 UTC m=+1696.276828469" watchObservedRunningTime="2026-01-30 12:24:20.502092243 +0000 UTC m=+1696.279913897" Jan 30 12:24:21 crc kubenswrapper[4703]: I0130 12:24:21.329893 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 30 12:24:21 crc kubenswrapper[4703]: I0130 12:24:21.351986 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 30 12:24:21 crc kubenswrapper[4703]: I0130 12:24:21.358830 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 30 12:24:21 crc kubenswrapper[4703]: I0130 12:24:21.472632 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 30 12:24:21 crc kubenswrapper[4703]: I0130 12:24:21.792853 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 30 12:24:21 crc kubenswrapper[4703]: I0130 12:24:21.792925 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 30 12:24:22 crc kubenswrapper[4703]: I0130 12:24:22.479059 4703 generic.go:334] "Generic (PLEG): container finished" podID="38c055a8-b3f5-4648-acf9-8e0bdc3b4c51" containerID="253a146d77ad5c7781999b3fb1c220eefbd487e84ece1f3a1c3a39c289c862f8" exitCode=0 Jan 30 12:24:22 crc kubenswrapper[4703]: I0130 12:24:22.479206 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gxvg2" event={"ID":"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51","Type":"ContainerDied","Data":"253a146d77ad5c7781999b3fb1c220eefbd487e84ece1f3a1c3a39c289c862f8"} Jan 30 12:24:22 crc kubenswrapper[4703]: I0130 12:24:22.810423 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="42b1e803-4494-486f-960f-df4f64759fbf" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.221:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 30 12:24:22 crc kubenswrapper[4703]: I0130 12:24:22.811187 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="42b1e803-4494-486f-960f-df4f64759fbf" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.221:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 30 12:24:23 crc kubenswrapper[4703]: I0130 12:24:23.293665 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rcq2z" Jan 30 12:24:23 crc kubenswrapper[4703]: I0130 12:24:23.293807 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rcq2z" Jan 30 12:24:23 crc kubenswrapper[4703]: I0130 12:24:23.356231 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rcq2z" Jan 30 12:24:23 crc kubenswrapper[4703]: I0130 12:24:23.571214 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rcq2z" Jan 30 12:24:23 crc kubenswrapper[4703]: I0130 12:24:23.654283 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rcq2z"] Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.006259 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gxvg2" Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.141995 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-config-data\") pod \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\" (UID: \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\") " Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.142403 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-scripts\") pod \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\" (UID: \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\") " Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.142484 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pnz9\" (UniqueName: \"kubernetes.io/projected/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-kube-api-access-8pnz9\") pod \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\" (UID: \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\") " Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.142558 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-combined-ca-bundle\") pod \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\" (UID: \"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51\") " Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.150459 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-kube-api-access-8pnz9" (OuterVolumeSpecName: "kube-api-access-8pnz9") pod "38c055a8-b3f5-4648-acf9-8e0bdc3b4c51" (UID: "38c055a8-b3f5-4648-acf9-8e0bdc3b4c51"). InnerVolumeSpecName "kube-api-access-8pnz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.180465 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-scripts" (OuterVolumeSpecName: "scripts") pod "38c055a8-b3f5-4648-acf9-8e0bdc3b4c51" (UID: "38c055a8-b3f5-4648-acf9-8e0bdc3b4c51"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.231858 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-config-data" (OuterVolumeSpecName: "config-data") pod "38c055a8-b3f5-4648-acf9-8e0bdc3b4c51" (UID: "38c055a8-b3f5-4648-acf9-8e0bdc3b4c51"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.244396 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "38c055a8-b3f5-4648-acf9-8e0bdc3b4c51" (UID: "38c055a8-b3f5-4648-acf9-8e0bdc3b4c51"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.249606 4703 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-scripts\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.249671 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8pnz9\" (UniqueName: \"kubernetes.io/projected/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-kube-api-access-8pnz9\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.249694 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.249707 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.523336 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gxvg2" Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.525102 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gxvg2" event={"ID":"38c055a8-b3f5-4648-acf9-8e0bdc3b4c51","Type":"ContainerDied","Data":"f7d3ac8731375dce5550d61ed50a564c8734a4b99e09b59c58417badc780a114"} Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.525166 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7d3ac8731375dce5550d61ed50a564c8734a4b99e09b59c58417badc780a114" Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.722704 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.723096 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="42b1e803-4494-486f-960f-df4f64759fbf" containerName="nova-api-log" containerID="cri-o://08c55054ffe029f8a1bc7051b49066107eb51efa3418370d09cd88cb4a1e533d" gracePeriod=30 Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.723233 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="42b1e803-4494-486f-960f-df4f64759fbf" containerName="nova-api-api" containerID="cri-o://a4f0fb423de093ae5f13852bb464e82abcb1d4fdc7972849b71a1e3a03cca89f" gracePeriod=30 Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.740875 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.764989 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.765369 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2dc8f562-ab16-4028-8142-2fffd6f42a44" containerName="nova-metadata-log" containerID="cri-o://cd80b84024cdb7a627f658bae556313655a2cbc87f9966eed164ea09bb45da19" gracePeriod=30 Jan 30 12:24:24 crc kubenswrapper[4703]: I0130 12:24:24.765637 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2dc8f562-ab16-4028-8142-2fffd6f42a44" containerName="nova-metadata-metadata" containerID="cri-o://faf8f61aa0c3ef41ad8c69cdb0bafbf5c6b050feb8ead1b4586859fa23ba06f0" gracePeriod=30 Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.326796 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.479644 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5w55\" (UniqueName: \"kubernetes.io/projected/04da2786-8d07-4399-9200-95c1f596a97b-kube-api-access-h5w55\") pod \"04da2786-8d07-4399-9200-95c1f596a97b\" (UID: \"04da2786-8d07-4399-9200-95c1f596a97b\") " Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.479802 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04da2786-8d07-4399-9200-95c1f596a97b-config-data\") pod \"04da2786-8d07-4399-9200-95c1f596a97b\" (UID: \"04da2786-8d07-4399-9200-95c1f596a97b\") " Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.479823 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04da2786-8d07-4399-9200-95c1f596a97b-combined-ca-bundle\") pod \"04da2786-8d07-4399-9200-95c1f596a97b\" (UID: \"04da2786-8d07-4399-9200-95c1f596a97b\") " Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.487802 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04da2786-8d07-4399-9200-95c1f596a97b-kube-api-access-h5w55" (OuterVolumeSpecName: "kube-api-access-h5w55") pod "04da2786-8d07-4399-9200-95c1f596a97b" (UID: "04da2786-8d07-4399-9200-95c1f596a97b"). InnerVolumeSpecName "kube-api-access-h5w55". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.523723 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04da2786-8d07-4399-9200-95c1f596a97b-config-data" (OuterVolumeSpecName: "config-data") pod "04da2786-8d07-4399-9200-95c1f596a97b" (UID: "04da2786-8d07-4399-9200-95c1f596a97b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.526936 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04da2786-8d07-4399-9200-95c1f596a97b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "04da2786-8d07-4399-9200-95c1f596a97b" (UID: "04da2786-8d07-4399-9200-95c1f596a97b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.553748 4703 generic.go:334] "Generic (PLEG): container finished" podID="2dc8f562-ab16-4028-8142-2fffd6f42a44" containerID="cd80b84024cdb7a627f658bae556313655a2cbc87f9966eed164ea09bb45da19" exitCode=143 Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.553861 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2dc8f562-ab16-4028-8142-2fffd6f42a44","Type":"ContainerDied","Data":"cd80b84024cdb7a627f658bae556313655a2cbc87f9966eed164ea09bb45da19"} Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.558982 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"04da2786-8d07-4399-9200-95c1f596a97b","Type":"ContainerDied","Data":"aeb85865225d4f900bcc240fa194117ccdc51f4bc0f9a2a2a59d8c6c225c10ea"} Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.559025 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.559458 4703 scope.go:117] "RemoveContainer" containerID="e79d487b705f01d50335082c477a60d434f7bbfe2992cd7404cc8c776028602e" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.564551 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"42b1e803-4494-486f-960f-df4f64759fbf","Type":"ContainerDied","Data":"08c55054ffe029f8a1bc7051b49066107eb51efa3418370d09cd88cb4a1e533d"} Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.564609 4703 generic.go:334] "Generic (PLEG): container finished" podID="42b1e803-4494-486f-960f-df4f64759fbf" containerID="08c55054ffe029f8a1bc7051b49066107eb51efa3418370d09cd88cb4a1e533d" exitCode=143 Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.565035 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rcq2z" podUID="620650ab-34d8-4489-b809-a799a1e64c5c" containerName="registry-server" containerID="cri-o://2f320830df6e5a3a6c5d69927ea38bc1bf6818f1a7bf44ffa993cc1a3bbc475a" gracePeriod=2 Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.583250 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5w55\" (UniqueName: \"kubernetes.io/projected/04da2786-8d07-4399-9200-95c1f596a97b-kube-api-access-h5w55\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.583488 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04da2786-8d07-4399-9200-95c1f596a97b-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.583617 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/04da2786-8d07-4399-9200-95c1f596a97b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.615276 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.639658 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.701728 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 30 12:24:25 crc kubenswrapper[4703]: E0130 12:24:25.702381 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38c055a8-b3f5-4648-acf9-8e0bdc3b4c51" containerName="nova-manage" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.702408 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="38c055a8-b3f5-4648-acf9-8e0bdc3b4c51" containerName="nova-manage" Jan 30 12:24:25 crc kubenswrapper[4703]: E0130 12:24:25.702432 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04da2786-8d07-4399-9200-95c1f596a97b" containerName="nova-scheduler-scheduler" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.702441 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="04da2786-8d07-4399-9200-95c1f596a97b" containerName="nova-scheduler-scheduler" Jan 30 12:24:25 crc kubenswrapper[4703]: E0130 12:24:25.702454 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04da2786-8d07-4399-9200-95c1f596a97b" containerName="nova-scheduler-scheduler" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.702464 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="04da2786-8d07-4399-9200-95c1f596a97b" containerName="nova-scheduler-scheduler" Jan 30 12:24:25 crc kubenswrapper[4703]: E0130 12:24:25.702493 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8357d8b-c1e1-4411-92c9-4de33313aaeb" containerName="init" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.702502 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8357d8b-c1e1-4411-92c9-4de33313aaeb" containerName="init" Jan 30 12:24:25 crc kubenswrapper[4703]: E0130 12:24:25.702525 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8357d8b-c1e1-4411-92c9-4de33313aaeb" containerName="dnsmasq-dns" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.702535 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8357d8b-c1e1-4411-92c9-4de33313aaeb" containerName="dnsmasq-dns" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.702816 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8357d8b-c1e1-4411-92c9-4de33313aaeb" containerName="dnsmasq-dns" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.702844 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="04da2786-8d07-4399-9200-95c1f596a97b" containerName="nova-scheduler-scheduler" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.702854 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="38c055a8-b3f5-4648-acf9-8e0bdc3b4c51" containerName="nova-manage" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.702873 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="04da2786-8d07-4399-9200-95c1f596a97b" containerName="nova-scheduler-scheduler" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.704046 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.708744 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.735954 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.790156 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2rct\" (UniqueName: \"kubernetes.io/projected/2fc19a6b-3cde-4bb5-9499-f5be846289da-kube-api-access-d2rct\") pod \"nova-scheduler-0\" (UID: \"2fc19a6b-3cde-4bb5-9499-f5be846289da\") " pod="openstack/nova-scheduler-0" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.790507 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fc19a6b-3cde-4bb5-9499-f5be846289da-config-data\") pod \"nova-scheduler-0\" (UID: \"2fc19a6b-3cde-4bb5-9499-f5be846289da\") " pod="openstack/nova-scheduler-0" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.790687 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fc19a6b-3cde-4bb5-9499-f5be846289da-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"2fc19a6b-3cde-4bb5-9499-f5be846289da\") " pod="openstack/nova-scheduler-0" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.894251 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fc19a6b-3cde-4bb5-9499-f5be846289da-config-data\") pod \"nova-scheduler-0\" (UID: \"2fc19a6b-3cde-4bb5-9499-f5be846289da\") " pod="openstack/nova-scheduler-0" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.894351 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fc19a6b-3cde-4bb5-9499-f5be846289da-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"2fc19a6b-3cde-4bb5-9499-f5be846289da\") " pod="openstack/nova-scheduler-0" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.894607 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2rct\" (UniqueName: \"kubernetes.io/projected/2fc19a6b-3cde-4bb5-9499-f5be846289da-kube-api-access-d2rct\") pod \"nova-scheduler-0\" (UID: \"2fc19a6b-3cde-4bb5-9499-f5be846289da\") " pod="openstack/nova-scheduler-0" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.904565 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2fc19a6b-3cde-4bb5-9499-f5be846289da-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"2fc19a6b-3cde-4bb5-9499-f5be846289da\") " pod="openstack/nova-scheduler-0" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.904724 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2fc19a6b-3cde-4bb5-9499-f5be846289da-config-data\") pod \"nova-scheduler-0\" (UID: \"2fc19a6b-3cde-4bb5-9499-f5be846289da\") " pod="openstack/nova-scheduler-0" Jan 30 12:24:25 crc kubenswrapper[4703]: I0130 12:24:25.919509 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2rct\" (UniqueName: \"kubernetes.io/projected/2fc19a6b-3cde-4bb5-9499-f5be846289da-kube-api-access-d2rct\") pod \"nova-scheduler-0\" (UID: \"2fc19a6b-3cde-4bb5-9499-f5be846289da\") " pod="openstack/nova-scheduler-0" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.033278 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.231796 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rcq2z" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.327523 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/620650ab-34d8-4489-b809-a799a1e64c5c-utilities\") pod \"620650ab-34d8-4489-b809-a799a1e64c5c\" (UID: \"620650ab-34d8-4489-b809-a799a1e64c5c\") " Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.327715 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-46k89\" (UniqueName: \"kubernetes.io/projected/620650ab-34d8-4489-b809-a799a1e64c5c-kube-api-access-46k89\") pod \"620650ab-34d8-4489-b809-a799a1e64c5c\" (UID: \"620650ab-34d8-4489-b809-a799a1e64c5c\") " Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.327847 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/620650ab-34d8-4489-b809-a799a1e64c5c-catalog-content\") pod \"620650ab-34d8-4489-b809-a799a1e64c5c\" (UID: \"620650ab-34d8-4489-b809-a799a1e64c5c\") " Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.331555 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/620650ab-34d8-4489-b809-a799a1e64c5c-utilities" (OuterVolumeSpecName: "utilities") pod "620650ab-34d8-4489-b809-a799a1e64c5c" (UID: "620650ab-34d8-4489-b809-a799a1e64c5c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.340653 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/620650ab-34d8-4489-b809-a799a1e64c5c-kube-api-access-46k89" (OuterVolumeSpecName: "kube-api-access-46k89") pod "620650ab-34d8-4489-b809-a799a1e64c5c" (UID: "620650ab-34d8-4489-b809-a799a1e64c5c"). InnerVolumeSpecName "kube-api-access-46k89". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.360212 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/620650ab-34d8-4489-b809-a799a1e64c5c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "620650ab-34d8-4489-b809-a799a1e64c5c" (UID: "620650ab-34d8-4489-b809-a799a1e64c5c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.433814 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/620650ab-34d8-4489-b809-a799a1e64c5c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.433870 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/620650ab-34d8-4489-b809-a799a1e64c5c-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.433882 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-46k89\" (UniqueName: \"kubernetes.io/projected/620650ab-34d8-4489-b809-a799a1e64c5c-kube-api-access-46k89\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.582517 4703 generic.go:334] "Generic (PLEG): container finished" podID="620650ab-34d8-4489-b809-a799a1e64c5c" containerID="2f320830df6e5a3a6c5d69927ea38bc1bf6818f1a7bf44ffa993cc1a3bbc475a" exitCode=0 Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.582572 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rcq2z" event={"ID":"620650ab-34d8-4489-b809-a799a1e64c5c","Type":"ContainerDied","Data":"2f320830df6e5a3a6c5d69927ea38bc1bf6818f1a7bf44ffa993cc1a3bbc475a"} Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.582609 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rcq2z" event={"ID":"620650ab-34d8-4489-b809-a799a1e64c5c","Type":"ContainerDied","Data":"11d518aef3eeea65a15ed0b5600ea9949c75a2ffb2f44a9c9e28b5eade8ce50b"} Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.582610 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rcq2z" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.582630 4703 scope.go:117] "RemoveContainer" containerID="2f320830df6e5a3a6c5d69927ea38bc1bf6818f1a7bf44ffa993cc1a3bbc475a" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.622751 4703 scope.go:117] "RemoveContainer" containerID="101da32dd892123fc0217f2885e05fe9abcff87314ced2f26fbedbdc1fbbbbed" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.631657 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rcq2z"] Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.641237 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 30 12:24:26 crc kubenswrapper[4703]: W0130 12:24:26.641327 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fc19a6b_3cde_4bb5_9499_f5be846289da.slice/crio-31da43a37a623e1cf97348215ee4caee4387e64ba3df4a5a97576dbc65e03c61 WatchSource:0}: Error finding container 31da43a37a623e1cf97348215ee4caee4387e64ba3df4a5a97576dbc65e03c61: Status 404 returned error can't find the container with id 31da43a37a623e1cf97348215ee4caee4387e64ba3df4a5a97576dbc65e03c61 Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.653184 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rcq2z"] Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.653561 4703 scope.go:117] "RemoveContainer" containerID="da12449b1242f14725d6a450a26b627760e43dd214daf07af63344b2eac1a5b8" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.678678 4703 scope.go:117] "RemoveContainer" containerID="2f320830df6e5a3a6c5d69927ea38bc1bf6818f1a7bf44ffa993cc1a3bbc475a" Jan 30 12:24:26 crc kubenswrapper[4703]: E0130 12:24:26.679211 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f320830df6e5a3a6c5d69927ea38bc1bf6818f1a7bf44ffa993cc1a3bbc475a\": container with ID starting with 2f320830df6e5a3a6c5d69927ea38bc1bf6818f1a7bf44ffa993cc1a3bbc475a not found: ID does not exist" containerID="2f320830df6e5a3a6c5d69927ea38bc1bf6818f1a7bf44ffa993cc1a3bbc475a" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.679259 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f320830df6e5a3a6c5d69927ea38bc1bf6818f1a7bf44ffa993cc1a3bbc475a"} err="failed to get container status \"2f320830df6e5a3a6c5d69927ea38bc1bf6818f1a7bf44ffa993cc1a3bbc475a\": rpc error: code = NotFound desc = could not find container \"2f320830df6e5a3a6c5d69927ea38bc1bf6818f1a7bf44ffa993cc1a3bbc475a\": container with ID starting with 2f320830df6e5a3a6c5d69927ea38bc1bf6818f1a7bf44ffa993cc1a3bbc475a not found: ID does not exist" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.679298 4703 scope.go:117] "RemoveContainer" containerID="101da32dd892123fc0217f2885e05fe9abcff87314ced2f26fbedbdc1fbbbbed" Jan 30 12:24:26 crc kubenswrapper[4703]: E0130 12:24:26.679983 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"101da32dd892123fc0217f2885e05fe9abcff87314ced2f26fbedbdc1fbbbbed\": container with ID starting with 101da32dd892123fc0217f2885e05fe9abcff87314ced2f26fbedbdc1fbbbbed not found: ID does not exist" containerID="101da32dd892123fc0217f2885e05fe9abcff87314ced2f26fbedbdc1fbbbbed" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.680104 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"101da32dd892123fc0217f2885e05fe9abcff87314ced2f26fbedbdc1fbbbbed"} err="failed to get container status \"101da32dd892123fc0217f2885e05fe9abcff87314ced2f26fbedbdc1fbbbbed\": rpc error: code = NotFound desc = could not find container \"101da32dd892123fc0217f2885e05fe9abcff87314ced2f26fbedbdc1fbbbbed\": container with ID starting with 101da32dd892123fc0217f2885e05fe9abcff87314ced2f26fbedbdc1fbbbbed not found: ID does not exist" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.680242 4703 scope.go:117] "RemoveContainer" containerID="da12449b1242f14725d6a450a26b627760e43dd214daf07af63344b2eac1a5b8" Jan 30 12:24:26 crc kubenswrapper[4703]: E0130 12:24:26.686843 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da12449b1242f14725d6a450a26b627760e43dd214daf07af63344b2eac1a5b8\": container with ID starting with da12449b1242f14725d6a450a26b627760e43dd214daf07af63344b2eac1a5b8 not found: ID does not exist" containerID="da12449b1242f14725d6a450a26b627760e43dd214daf07af63344b2eac1a5b8" Jan 30 12:24:26 crc kubenswrapper[4703]: I0130 12:24:26.686881 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da12449b1242f14725d6a450a26b627760e43dd214daf07af63344b2eac1a5b8"} err="failed to get container status \"da12449b1242f14725d6a450a26b627760e43dd214daf07af63344b2eac1a5b8\": rpc error: code = NotFound desc = could not find container \"da12449b1242f14725d6a450a26b627760e43dd214daf07af63344b2eac1a5b8\": container with ID starting with da12449b1242f14725d6a450a26b627760e43dd214daf07af63344b2eac1a5b8 not found: ID does not exist" Jan 30 12:24:27 crc kubenswrapper[4703]: I0130 12:24:27.098275 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04da2786-8d07-4399-9200-95c1f596a97b" path="/var/lib/kubelet/pods/04da2786-8d07-4399-9200-95c1f596a97b/volumes" Jan 30 12:24:27 crc kubenswrapper[4703]: I0130 12:24:27.099761 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="620650ab-34d8-4489-b809-a799a1e64c5c" path="/var/lib/kubelet/pods/620650ab-34d8-4489-b809-a799a1e64c5c/volumes" Jan 30 12:24:27 crc kubenswrapper[4703]: I0130 12:24:27.598626 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerStarted","Data":"379e1db067847a9ebadf07796b4d79647d8e13d36181fe0778827bfae57d7bff"} Jan 30 12:24:27 crc kubenswrapper[4703]: I0130 12:24:27.598702 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerStarted","Data":"31da43a37a623e1cf97348215ee4caee4387e64ba3df4a5a97576dbc65e03c61"} Jan 30 12:24:27 crc kubenswrapper[4703]: I0130 12:24:27.619637 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.6196103710000003 podStartE2EDuration="2.619610371s" podCreationTimestamp="2026-01-30 12:24:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:24:27.615111335 +0000 UTC m=+1703.392932979" watchObservedRunningTime="2026-01-30 12:24:27.619610371 +0000 UTC m=+1703.397432025" Jan 30 12:24:27 crc kubenswrapper[4703]: I0130 12:24:27.928480 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="2dc8f562-ab16-4028-8142-2fffd6f42a44" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.218:8775/\": read tcp 10.217.0.2:51708->10.217.0.218:8775: read: connection reset by peer" Jan 30 12:24:27 crc kubenswrapper[4703]: I0130 12:24:27.928865 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="2dc8f562-ab16-4028-8142-2fffd6f42a44" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.218:8775/\": read tcp 10.217.0.2:51712->10.217.0.218:8775: read: connection reset by peer" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.446786 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.497840 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2dc8f562-ab16-4028-8142-2fffd6f42a44-logs\") pod \"2dc8f562-ab16-4028-8142-2fffd6f42a44\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.497895 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bm549\" (UniqueName: \"kubernetes.io/projected/2dc8f562-ab16-4028-8142-2fffd6f42a44-kube-api-access-bm549\") pod \"2dc8f562-ab16-4028-8142-2fffd6f42a44\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.498233 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-config-data\") pod \"2dc8f562-ab16-4028-8142-2fffd6f42a44\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.498329 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-nova-metadata-tls-certs\") pod \"2dc8f562-ab16-4028-8142-2fffd6f42a44\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.498354 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-combined-ca-bundle\") pod \"2dc8f562-ab16-4028-8142-2fffd6f42a44\" (UID: \"2dc8f562-ab16-4028-8142-2fffd6f42a44\") " Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.509479 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2dc8f562-ab16-4028-8142-2fffd6f42a44-logs" (OuterVolumeSpecName: "logs") pod "2dc8f562-ab16-4028-8142-2fffd6f42a44" (UID: "2dc8f562-ab16-4028-8142-2fffd6f42a44"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.526528 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dc8f562-ab16-4028-8142-2fffd6f42a44-kube-api-access-bm549" (OuterVolumeSpecName: "kube-api-access-bm549") pod "2dc8f562-ab16-4028-8142-2fffd6f42a44" (UID: "2dc8f562-ab16-4028-8142-2fffd6f42a44"). InnerVolumeSpecName "kube-api-access-bm549". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.580332 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2dc8f562-ab16-4028-8142-2fffd6f42a44" (UID: "2dc8f562-ab16-4028-8142-2fffd6f42a44"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.593269 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-config-data" (OuterVolumeSpecName: "config-data") pod "2dc8f562-ab16-4028-8142-2fffd6f42a44" (UID: "2dc8f562-ab16-4028-8142-2fffd6f42a44"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.601054 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2dc8f562-ab16-4028-8142-2fffd6f42a44-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.601087 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bm549\" (UniqueName: \"kubernetes.io/projected/2dc8f562-ab16-4028-8142-2fffd6f42a44-kube-api-access-bm549\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.601464 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.601475 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.620582 4703 generic.go:334] "Generic (PLEG): container finished" podID="2dc8f562-ab16-4028-8142-2fffd6f42a44" containerID="faf8f61aa0c3ef41ad8c69cdb0bafbf5c6b050feb8ead1b4586859fa23ba06f0" exitCode=0 Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.621637 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.621660 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2dc8f562-ab16-4028-8142-2fffd6f42a44","Type":"ContainerDied","Data":"faf8f61aa0c3ef41ad8c69cdb0bafbf5c6b050feb8ead1b4586859fa23ba06f0"} Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.621781 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2dc8f562-ab16-4028-8142-2fffd6f42a44","Type":"ContainerDied","Data":"f1eb7206036f73e753b374d7145226b8e85003a630c1c02d6126b47f3f610a5c"} Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.621810 4703 scope.go:117] "RemoveContainer" containerID="faf8f61aa0c3ef41ad8c69cdb0bafbf5c6b050feb8ead1b4586859fa23ba06f0" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.628849 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "2dc8f562-ab16-4028-8142-2fffd6f42a44" (UID: "2dc8f562-ab16-4028-8142-2fffd6f42a44"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.678112 4703 scope.go:117] "RemoveContainer" containerID="cd80b84024cdb7a627f658bae556313655a2cbc87f9966eed164ea09bb45da19" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.703587 4703 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2dc8f562-ab16-4028-8142-2fffd6f42a44-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.708098 4703 scope.go:117] "RemoveContainer" containerID="faf8f61aa0c3ef41ad8c69cdb0bafbf5c6b050feb8ead1b4586859fa23ba06f0" Jan 30 12:24:28 crc kubenswrapper[4703]: E0130 12:24:28.708638 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"faf8f61aa0c3ef41ad8c69cdb0bafbf5c6b050feb8ead1b4586859fa23ba06f0\": container with ID starting with faf8f61aa0c3ef41ad8c69cdb0bafbf5c6b050feb8ead1b4586859fa23ba06f0 not found: ID does not exist" containerID="faf8f61aa0c3ef41ad8c69cdb0bafbf5c6b050feb8ead1b4586859fa23ba06f0" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.708670 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"faf8f61aa0c3ef41ad8c69cdb0bafbf5c6b050feb8ead1b4586859fa23ba06f0"} err="failed to get container status \"faf8f61aa0c3ef41ad8c69cdb0bafbf5c6b050feb8ead1b4586859fa23ba06f0\": rpc error: code = NotFound desc = could not find container \"faf8f61aa0c3ef41ad8c69cdb0bafbf5c6b050feb8ead1b4586859fa23ba06f0\": container with ID starting with faf8f61aa0c3ef41ad8c69cdb0bafbf5c6b050feb8ead1b4586859fa23ba06f0 not found: ID does not exist" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.708698 4703 scope.go:117] "RemoveContainer" containerID="cd80b84024cdb7a627f658bae556313655a2cbc87f9966eed164ea09bb45da19" Jan 30 12:24:28 crc kubenswrapper[4703]: E0130 12:24:28.709206 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd80b84024cdb7a627f658bae556313655a2cbc87f9966eed164ea09bb45da19\": container with ID starting with cd80b84024cdb7a627f658bae556313655a2cbc87f9966eed164ea09bb45da19 not found: ID does not exist" containerID="cd80b84024cdb7a627f658bae556313655a2cbc87f9966eed164ea09bb45da19" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.709241 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd80b84024cdb7a627f658bae556313655a2cbc87f9966eed164ea09bb45da19"} err="failed to get container status \"cd80b84024cdb7a627f658bae556313655a2cbc87f9966eed164ea09bb45da19\": rpc error: code = NotFound desc = could not find container \"cd80b84024cdb7a627f658bae556313655a2cbc87f9966eed164ea09bb45da19\": container with ID starting with cd80b84024cdb7a627f658bae556313655a2cbc87f9966eed164ea09bb45da19 not found: ID does not exist" Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.966402 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 30 12:24:28 crc kubenswrapper[4703]: I0130 12:24:28.982958 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.002473 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 30 12:24:29 crc kubenswrapper[4703]: E0130 12:24:29.003224 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="620650ab-34d8-4489-b809-a799a1e64c5c" containerName="extract-content" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.003254 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="620650ab-34d8-4489-b809-a799a1e64c5c" containerName="extract-content" Jan 30 12:24:29 crc kubenswrapper[4703]: E0130 12:24:29.003272 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="620650ab-34d8-4489-b809-a799a1e64c5c" containerName="extract-utilities" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.003282 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="620650ab-34d8-4489-b809-a799a1e64c5c" containerName="extract-utilities" Jan 30 12:24:29 crc kubenswrapper[4703]: E0130 12:24:29.003319 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dc8f562-ab16-4028-8142-2fffd6f42a44" containerName="nova-metadata-metadata" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.003327 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dc8f562-ab16-4028-8142-2fffd6f42a44" containerName="nova-metadata-metadata" Jan 30 12:24:29 crc kubenswrapper[4703]: E0130 12:24:29.003341 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dc8f562-ab16-4028-8142-2fffd6f42a44" containerName="nova-metadata-log" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.003352 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dc8f562-ab16-4028-8142-2fffd6f42a44" containerName="nova-metadata-log" Jan 30 12:24:29 crc kubenswrapper[4703]: E0130 12:24:29.003371 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="620650ab-34d8-4489-b809-a799a1e64c5c" containerName="registry-server" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.003378 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="620650ab-34d8-4489-b809-a799a1e64c5c" containerName="registry-server" Jan 30 12:24:29 crc kubenswrapper[4703]: E0130 12:24:29.003397 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04da2786-8d07-4399-9200-95c1f596a97b" containerName="nova-scheduler-scheduler" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.003406 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="04da2786-8d07-4399-9200-95c1f596a97b" containerName="nova-scheduler-scheduler" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.003670 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="04da2786-8d07-4399-9200-95c1f596a97b" containerName="nova-scheduler-scheduler" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.003696 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dc8f562-ab16-4028-8142-2fffd6f42a44" containerName="nova-metadata-log" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.003712 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="620650ab-34d8-4489-b809-a799a1e64c5c" containerName="registry-server" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.003730 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dc8f562-ab16-4028-8142-2fffd6f42a44" containerName="nova-metadata-metadata" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.005271 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.011079 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.011370 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.013860 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.105566 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2dc8f562-ab16-4028-8142-2fffd6f42a44" path="/var/lib/kubelet/pods/2dc8f562-ab16-4028-8142-2fffd6f42a44/volumes" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.112792 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmnxv\" (UniqueName: \"kubernetes.io/projected/d4411a18-e909-4a1c-8e14-7c2014546ab6-kube-api-access-qmnxv\") pod \"nova-metadata-0\" (UID: \"d4411a18-e909-4a1c-8e14-7c2014546ab6\") " pod="openstack/nova-metadata-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.112855 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4411a18-e909-4a1c-8e14-7c2014546ab6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d4411a18-e909-4a1c-8e14-7c2014546ab6\") " pod="openstack/nova-metadata-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.113011 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4411a18-e909-4a1c-8e14-7c2014546ab6-logs\") pod \"nova-metadata-0\" (UID: \"d4411a18-e909-4a1c-8e14-7c2014546ab6\") " pod="openstack/nova-metadata-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.113059 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4411a18-e909-4a1c-8e14-7c2014546ab6-config-data\") pod \"nova-metadata-0\" (UID: \"d4411a18-e909-4a1c-8e14-7c2014546ab6\") " pod="openstack/nova-metadata-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.113375 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4411a18-e909-4a1c-8e14-7c2014546ab6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d4411a18-e909-4a1c-8e14-7c2014546ab6\") " pod="openstack/nova-metadata-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.215802 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4411a18-e909-4a1c-8e14-7c2014546ab6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d4411a18-e909-4a1c-8e14-7c2014546ab6\") " pod="openstack/nova-metadata-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.215931 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmnxv\" (UniqueName: \"kubernetes.io/projected/d4411a18-e909-4a1c-8e14-7c2014546ab6-kube-api-access-qmnxv\") pod \"nova-metadata-0\" (UID: \"d4411a18-e909-4a1c-8e14-7c2014546ab6\") " pod="openstack/nova-metadata-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.215956 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4411a18-e909-4a1c-8e14-7c2014546ab6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d4411a18-e909-4a1c-8e14-7c2014546ab6\") " pod="openstack/nova-metadata-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.216022 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4411a18-e909-4a1c-8e14-7c2014546ab6-logs\") pod \"nova-metadata-0\" (UID: \"d4411a18-e909-4a1c-8e14-7c2014546ab6\") " pod="openstack/nova-metadata-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.216074 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4411a18-e909-4a1c-8e14-7c2014546ab6-config-data\") pod \"nova-metadata-0\" (UID: \"d4411a18-e909-4a1c-8e14-7c2014546ab6\") " pod="openstack/nova-metadata-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.218394 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4411a18-e909-4a1c-8e14-7c2014546ab6-logs\") pod \"nova-metadata-0\" (UID: \"d4411a18-e909-4a1c-8e14-7c2014546ab6\") " pod="openstack/nova-metadata-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.222795 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4411a18-e909-4a1c-8e14-7c2014546ab6-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"d4411a18-e909-4a1c-8e14-7c2014546ab6\") " pod="openstack/nova-metadata-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.224968 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4411a18-e909-4a1c-8e14-7c2014546ab6-config-data\") pod \"nova-metadata-0\" (UID: \"d4411a18-e909-4a1c-8e14-7c2014546ab6\") " pod="openstack/nova-metadata-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.225045 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4411a18-e909-4a1c-8e14-7c2014546ab6-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d4411a18-e909-4a1c-8e14-7c2014546ab6\") " pod="openstack/nova-metadata-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.240318 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmnxv\" (UniqueName: \"kubernetes.io/projected/d4411a18-e909-4a1c-8e14-7c2014546ab6-kube-api-access-qmnxv\") pod \"nova-metadata-0\" (UID: \"d4411a18-e909-4a1c-8e14-7c2014546ab6\") " pod="openstack/nova-metadata-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.378897 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.672167 4703 generic.go:334] "Generic (PLEG): container finished" podID="42b1e803-4494-486f-960f-df4f64759fbf" containerID="a4f0fb423de093ae5f13852bb464e82abcb1d4fdc7972849b71a1e3a03cca89f" exitCode=0 Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.676410 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"42b1e803-4494-486f-960f-df4f64759fbf","Type":"ContainerDied","Data":"a4f0fb423de093ae5f13852bb464e82abcb1d4fdc7972849b71a1e3a03cca89f"} Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.697571 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.735246 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-config-data\") pod \"42b1e803-4494-486f-960f-df4f64759fbf\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.735335 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-combined-ca-bundle\") pod \"42b1e803-4494-486f-960f-df4f64759fbf\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.735364 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-public-tls-certs\") pod \"42b1e803-4494-486f-960f-df4f64759fbf\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.735477 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j69zz\" (UniqueName: \"kubernetes.io/projected/42b1e803-4494-486f-960f-df4f64759fbf-kube-api-access-j69zz\") pod \"42b1e803-4494-486f-960f-df4f64759fbf\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.735550 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42b1e803-4494-486f-960f-df4f64759fbf-logs\") pod \"42b1e803-4494-486f-960f-df4f64759fbf\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.735588 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-internal-tls-certs\") pod \"42b1e803-4494-486f-960f-df4f64759fbf\" (UID: \"42b1e803-4494-486f-960f-df4f64759fbf\") " Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.754924 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/42b1e803-4494-486f-960f-df4f64759fbf-logs" (OuterVolumeSpecName: "logs") pod "42b1e803-4494-486f-960f-df4f64759fbf" (UID: "42b1e803-4494-486f-960f-df4f64759fbf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.773951 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42b1e803-4494-486f-960f-df4f64759fbf-kube-api-access-j69zz" (OuterVolumeSpecName: "kube-api-access-j69zz") pod "42b1e803-4494-486f-960f-df4f64759fbf" (UID: "42b1e803-4494-486f-960f-df4f64759fbf"). InnerVolumeSpecName "kube-api-access-j69zz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.806102 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "42b1e803-4494-486f-960f-df4f64759fbf" (UID: "42b1e803-4494-486f-960f-df4f64759fbf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.807615 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-config-data" (OuterVolumeSpecName: "config-data") pod "42b1e803-4494-486f-960f-df4f64759fbf" (UID: "42b1e803-4494-486f-960f-df4f64759fbf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.824701 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "42b1e803-4494-486f-960f-df4f64759fbf" (UID: "42b1e803-4494-486f-960f-df4f64759fbf"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.832079 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "42b1e803-4494-486f-960f-df4f64759fbf" (UID: "42b1e803-4494-486f-960f-df4f64759fbf"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.839060 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.839115 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.839156 4703 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.839168 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j69zz\" (UniqueName: \"kubernetes.io/projected/42b1e803-4494-486f-960f-df4f64759fbf-kube-api-access-j69zz\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.839183 4703 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/42b1e803-4494-486f-960f-df4f64759fbf-logs\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.839194 4703 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/42b1e803-4494-486f-960f-df4f64759fbf-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 30 12:24:29 crc kubenswrapper[4703]: I0130 12:24:29.922573 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.703753 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.703749 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"42b1e803-4494-486f-960f-df4f64759fbf","Type":"ContainerDied","Data":"fed30f35551a4abb9d7a5ce4db9aa50094644ffd6c751cd1c81ef219b979953f"} Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.704380 4703 scope.go:117] "RemoveContainer" containerID="a4f0fb423de093ae5f13852bb464e82abcb1d4fdc7972849b71a1e3a03cca89f" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.707408 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d4411a18-e909-4a1c-8e14-7c2014546ab6","Type":"ContainerStarted","Data":"6fcaaf9f0fc9a1e7d7b4b7e77b7e8458360e81ccbe4ddc67d35df3d6bb27b257"} Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.707470 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d4411a18-e909-4a1c-8e14-7c2014546ab6","Type":"ContainerStarted","Data":"bbe65af9ca686b0f1fd3d405d7fa3e5ed6eec238e06c79059629ba7834f7a822"} Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.707479 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d4411a18-e909-4a1c-8e14-7c2014546ab6","Type":"ContainerStarted","Data":"d2f7fb651df24725fe8ea9f30ac565cde4307320c5d91830eff918ac0f0aa72d"} Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.714234 4703 generic.go:334] "Generic (PLEG): container finished" podID="2fc19a6b-3cde-4bb5-9499-f5be846289da" containerID="379e1db067847a9ebadf07796b4d79647d8e13d36181fe0778827bfae57d7bff" exitCode=1 Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.714310 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerDied","Data":"379e1db067847a9ebadf07796b4d79647d8e13d36181fe0778827bfae57d7bff"} Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.714998 4703 scope.go:117] "RemoveContainer" containerID="379e1db067847a9ebadf07796b4d79647d8e13d36181fe0778827bfae57d7bff" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.755218 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.755190161 podStartE2EDuration="2.755190161s" podCreationTimestamp="2026-01-30 12:24:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:24:30.735262973 +0000 UTC m=+1706.513084627" watchObservedRunningTime="2026-01-30 12:24:30.755190161 +0000 UTC m=+1706.533011815" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.778014 4703 scope.go:117] "RemoveContainer" containerID="08c55054ffe029f8a1bc7051b49066107eb51efa3418370d09cd88cb4a1e533d" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.789591 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.813637 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.826367 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 30 12:24:30 crc kubenswrapper[4703]: E0130 12:24:30.827084 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42b1e803-4494-486f-960f-df4f64759fbf" containerName="nova-api-api" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.827103 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="42b1e803-4494-486f-960f-df4f64759fbf" containerName="nova-api-api" Jan 30 12:24:30 crc kubenswrapper[4703]: E0130 12:24:30.827164 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42b1e803-4494-486f-960f-df4f64759fbf" containerName="nova-api-log" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.827174 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="42b1e803-4494-486f-960f-df4f64759fbf" containerName="nova-api-log" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.827427 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="42b1e803-4494-486f-960f-df4f64759fbf" containerName="nova-api-log" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.827442 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="42b1e803-4494-486f-960f-df4f64759fbf" containerName="nova-api-api" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.829039 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.836559 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.836777 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.839491 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.840194 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.872198 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d551d53-088a-4087-9a64-f26c936a32fe-internal-tls-certs\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.872329 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d551d53-088a-4087-9a64-f26c936a32fe-public-tls-certs\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.872376 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d551d53-088a-4087-9a64-f26c936a32fe-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.872416 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9lcsk\" (UniqueName: \"kubernetes.io/projected/5d551d53-088a-4087-9a64-f26c936a32fe-kube-api-access-9lcsk\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.872480 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d551d53-088a-4087-9a64-f26c936a32fe-logs\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.872550 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d551d53-088a-4087-9a64-f26c936a32fe-config-data\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.974405 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d551d53-088a-4087-9a64-f26c936a32fe-logs\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.974502 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d551d53-088a-4087-9a64-f26c936a32fe-config-data\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.974559 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d551d53-088a-4087-9a64-f26c936a32fe-internal-tls-certs\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.974636 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d551d53-088a-4087-9a64-f26c936a32fe-public-tls-certs\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.974669 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d551d53-088a-4087-9a64-f26c936a32fe-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.974703 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9lcsk\" (UniqueName: \"kubernetes.io/projected/5d551d53-088a-4087-9a64-f26c936a32fe-kube-api-access-9lcsk\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.975761 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5d551d53-088a-4087-9a64-f26c936a32fe-logs\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.982375 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d551d53-088a-4087-9a64-f26c936a32fe-internal-tls-certs\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.982453 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d551d53-088a-4087-9a64-f26c936a32fe-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.985937 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d551d53-088a-4087-9a64-f26c936a32fe-config-data\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.987579 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d551d53-088a-4087-9a64-f26c936a32fe-public-tls-certs\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:30 crc kubenswrapper[4703]: I0130 12:24:30.994933 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9lcsk\" (UniqueName: \"kubernetes.io/projected/5d551d53-088a-4087-9a64-f26c936a32fe-kube-api-access-9lcsk\") pod \"nova-api-0\" (UID: \"5d551d53-088a-4087-9a64-f26c936a32fe\") " pod="openstack/nova-api-0" Jan 30 12:24:31 crc kubenswrapper[4703]: I0130 12:24:31.035558 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:24:31 crc kubenswrapper[4703]: I0130 12:24:31.102604 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42b1e803-4494-486f-960f-df4f64759fbf" path="/var/lib/kubelet/pods/42b1e803-4494-486f-960f-df4f64759fbf/volumes" Jan 30 12:24:31 crc kubenswrapper[4703]: I0130 12:24:31.214853 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 30 12:24:31 crc kubenswrapper[4703]: I0130 12:24:31.731627 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerStarted","Data":"78058cafa7cc98b6120713b045991f694ac35074c6dc801ad58aa79adade75f5"} Jan 30 12:24:31 crc kubenswrapper[4703]: I0130 12:24:31.750818 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 30 12:24:32 crc kubenswrapper[4703]: I0130 12:24:32.745948 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5d551d53-088a-4087-9a64-f26c936a32fe","Type":"ContainerStarted","Data":"b6fcf12193845bc3d5012dca76a1353705da2e5bc44409ae3338d679eea2a061"} Jan 30 12:24:32 crc kubenswrapper[4703]: I0130 12:24:32.747095 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5d551d53-088a-4087-9a64-f26c936a32fe","Type":"ContainerStarted","Data":"7ddd272291bd9ae9bbcd94291ba6e8d7a45523433d86cbf5be606f1f5e83c025"} Jan 30 12:24:32 crc kubenswrapper[4703]: I0130 12:24:32.747188 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5d551d53-088a-4087-9a64-f26c936a32fe","Type":"ContainerStarted","Data":"f42a43852cfc7a162928d0e7ecbf7400c92f497a2c07e138fc2fb8a185e4dc0b"} Jan 30 12:24:32 crc kubenswrapper[4703]: I0130 12:24:32.773014 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.772985389 podStartE2EDuration="2.772985389s" podCreationTimestamp="2026-01-30 12:24:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:24:32.763984689 +0000 UTC m=+1708.541806363" watchObservedRunningTime="2026-01-30 12:24:32.772985389 +0000 UTC m=+1708.550807043" Jan 30 12:24:34 crc kubenswrapper[4703]: I0130 12:24:34.379111 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 30 12:24:34 crc kubenswrapper[4703]: I0130 12:24:34.379671 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 30 12:24:34 crc kubenswrapper[4703]: I0130 12:24:34.775806 4703 generic.go:334] "Generic (PLEG): container finished" podID="2fc19a6b-3cde-4bb5-9499-f5be846289da" containerID="78058cafa7cc98b6120713b045991f694ac35074c6dc801ad58aa79adade75f5" exitCode=1 Jan 30 12:24:34 crc kubenswrapper[4703]: I0130 12:24:34.777351 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerDied","Data":"78058cafa7cc98b6120713b045991f694ac35074c6dc801ad58aa79adade75f5"} Jan 30 12:24:34 crc kubenswrapper[4703]: I0130 12:24:34.777416 4703 scope.go:117] "RemoveContainer" containerID="379e1db067847a9ebadf07796b4d79647d8e13d36181fe0778827bfae57d7bff" Jan 30 12:24:34 crc kubenswrapper[4703]: I0130 12:24:34.777985 4703 scope.go:117] "RemoveContainer" containerID="78058cafa7cc98b6120713b045991f694ac35074c6dc801ad58aa79adade75f5" Jan 30 12:24:34 crc kubenswrapper[4703]: E0130 12:24:34.778240 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 10s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:24:36 crc kubenswrapper[4703]: I0130 12:24:36.035257 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:24:36 crc kubenswrapper[4703]: I0130 12:24:36.036433 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:24:36 crc kubenswrapper[4703]: I0130 12:24:36.036449 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:24:36 crc kubenswrapper[4703]: I0130 12:24:36.036460 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:24:36 crc kubenswrapper[4703]: I0130 12:24:36.037616 4703 scope.go:117] "RemoveContainer" containerID="78058cafa7cc98b6120713b045991f694ac35074c6dc801ad58aa79adade75f5" Jan 30 12:24:36 crc kubenswrapper[4703]: E0130 12:24:36.037982 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 10s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:24:39 crc kubenswrapper[4703]: I0130 12:24:39.379666 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 30 12:24:39 crc kubenswrapper[4703]: I0130 12:24:39.381230 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 30 12:24:40 crc kubenswrapper[4703]: I0130 12:24:40.397299 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d4411a18-e909-4a1c-8e14-7c2014546ab6" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.226:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 30 12:24:40 crc kubenswrapper[4703]: I0130 12:24:40.397324 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d4411a18-e909-4a1c-8e14-7c2014546ab6" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.226:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 30 12:24:41 crc kubenswrapper[4703]: I0130 12:24:41.217458 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 30 12:24:41 crc kubenswrapper[4703]: I0130 12:24:41.218001 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 30 12:24:42 crc kubenswrapper[4703]: I0130 12:24:42.301326 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5d551d53-088a-4087-9a64-f26c936a32fe" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.227:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 30 12:24:42 crc kubenswrapper[4703]: I0130 12:24:42.301353 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5d551d53-088a-4087-9a64-f26c936a32fe" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.227:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 30 12:24:42 crc kubenswrapper[4703]: I0130 12:24:42.824903 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:24:42 crc kubenswrapper[4703]: I0130 12:24:42.825504 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:24:42 crc kubenswrapper[4703]: I0130 12:24:42.825678 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 12:24:42 crc kubenswrapper[4703]: I0130 12:24:42.827146 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127"} pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 12:24:42 crc kubenswrapper[4703]: I0130 12:24:42.827398 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" containerID="cri-o://3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" gracePeriod=600 Jan 30 12:24:42 crc kubenswrapper[4703]: I0130 12:24:42.876674 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 30 12:24:42 crc kubenswrapper[4703]: E0130 12:24:42.966679 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:24:43 crc kubenswrapper[4703]: I0130 12:24:43.915242 4703 generic.go:334] "Generic (PLEG): container finished" podID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" exitCode=0 Jan 30 12:24:43 crc kubenswrapper[4703]: I0130 12:24:43.915315 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerDied","Data":"3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127"} Jan 30 12:24:43 crc kubenswrapper[4703]: I0130 12:24:43.916578 4703 scope.go:117] "RemoveContainer" containerID="ae09ea0f762d55711d4ebe52875d9283498e826f1ea02651fb958e545587bc81" Jan 30 12:24:43 crc kubenswrapper[4703]: I0130 12:24:43.917607 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:24:43 crc kubenswrapper[4703]: E0130 12:24:43.918100 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:24:49 crc kubenswrapper[4703]: I0130 12:24:49.087783 4703 scope.go:117] "RemoveContainer" containerID="78058cafa7cc98b6120713b045991f694ac35074c6dc801ad58aa79adade75f5" Jan 30 12:24:49 crc kubenswrapper[4703]: I0130 12:24:49.522466 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 30 12:24:49 crc kubenswrapper[4703]: I0130 12:24:49.523095 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 30 12:24:49 crc kubenswrapper[4703]: I0130 12:24:49.533525 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 30 12:24:49 crc kubenswrapper[4703]: I0130 12:24:49.534444 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 30 12:24:49 crc kubenswrapper[4703]: I0130 12:24:49.992396 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerStarted","Data":"3bfce38e9d96e6b409783611402f29538c98521cc81a7a3a5def494c1449093f"} Jan 30 12:24:50 crc kubenswrapper[4703]: I0130 12:24:50.315373 4703 scope.go:117] "RemoveContainer" containerID="0bd65d40ff36dcdf736ec5c43b3d91c6169e7a13217349bf119e9e118c38285c" Jan 30 12:24:51 crc kubenswrapper[4703]: I0130 12:24:51.035468 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:24:51 crc kubenswrapper[4703]: I0130 12:24:51.223155 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 30 12:24:51 crc kubenswrapper[4703]: I0130 12:24:51.224238 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 30 12:24:51 crc kubenswrapper[4703]: I0130 12:24:51.224376 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 30 12:24:51 crc kubenswrapper[4703]: I0130 12:24:51.235164 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 30 12:24:52 crc kubenswrapper[4703]: I0130 12:24:52.014130 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 30 12:24:52 crc kubenswrapper[4703]: I0130 12:24:52.024938 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 30 12:24:53 crc kubenswrapper[4703]: I0130 12:24:53.027798 4703 generic.go:334] "Generic (PLEG): container finished" podID="2fc19a6b-3cde-4bb5-9499-f5be846289da" containerID="3bfce38e9d96e6b409783611402f29538c98521cc81a7a3a5def494c1449093f" exitCode=1 Jan 30 12:24:53 crc kubenswrapper[4703]: I0130 12:24:53.027860 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerDied","Data":"3bfce38e9d96e6b409783611402f29538c98521cc81a7a3a5def494c1449093f"} Jan 30 12:24:53 crc kubenswrapper[4703]: I0130 12:24:53.028386 4703 scope.go:117] "RemoveContainer" containerID="78058cafa7cc98b6120713b045991f694ac35074c6dc801ad58aa79adade75f5" Jan 30 12:24:53 crc kubenswrapper[4703]: I0130 12:24:53.029764 4703 scope.go:117] "RemoveContainer" containerID="3bfce38e9d96e6b409783611402f29538c98521cc81a7a3a5def494c1449093f" Jan 30 12:24:53 crc kubenswrapper[4703]: E0130 12:24:53.030199 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 20s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:24:56 crc kubenswrapper[4703]: I0130 12:24:56.035369 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:24:56 crc kubenswrapper[4703]: I0130 12:24:56.035739 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:24:56 crc kubenswrapper[4703]: I0130 12:24:56.035751 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:24:56 crc kubenswrapper[4703]: I0130 12:24:56.036935 4703 scope.go:117] "RemoveContainer" containerID="3bfce38e9d96e6b409783611402f29538c98521cc81a7a3a5def494c1449093f" Jan 30 12:24:56 crc kubenswrapper[4703]: E0130 12:24:56.037286 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 20s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:24:58 crc kubenswrapper[4703]: I0130 12:24:58.087161 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:24:58 crc kubenswrapper[4703]: E0130 12:24:58.087980 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:25:09 crc kubenswrapper[4703]: I0130 12:25:09.087186 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:25:09 crc kubenswrapper[4703]: E0130 12:25:09.088587 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:25:10 crc kubenswrapper[4703]: I0130 12:25:10.087456 4703 scope.go:117] "RemoveContainer" containerID="3bfce38e9d96e6b409783611402f29538c98521cc81a7a3a5def494c1449093f" Jan 30 12:25:10 crc kubenswrapper[4703]: E0130 12:25:10.087993 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 20s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:25:20 crc kubenswrapper[4703]: I0130 12:25:20.110788 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:25:20 crc kubenswrapper[4703]: E0130 12:25:20.111765 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:25:24 crc kubenswrapper[4703]: I0130 12:25:24.087332 4703 scope.go:117] "RemoveContainer" containerID="3bfce38e9d96e6b409783611402f29538c98521cc81a7a3a5def494c1449093f" Jan 30 12:25:24 crc kubenswrapper[4703]: I0130 12:25:24.413533 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerStarted","Data":"b0524fb3e1551d249a0fbd6dc660597b23942b6881789d881cfbc7b65d3d1252"} Jan 30 12:25:26 crc kubenswrapper[4703]: I0130 12:25:26.035082 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:25:26 crc kubenswrapper[4703]: I0130 12:25:26.035631 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:25:26 crc kubenswrapper[4703]: I0130 12:25:26.070522 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 30 12:25:26 crc kubenswrapper[4703]: I0130 12:25:26.463037 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 30 12:25:27 crc kubenswrapper[4703]: I0130 12:25:27.449156 4703 generic.go:334] "Generic (PLEG): container finished" podID="2fc19a6b-3cde-4bb5-9499-f5be846289da" containerID="b0524fb3e1551d249a0fbd6dc660597b23942b6881789d881cfbc7b65d3d1252" exitCode=1 Jan 30 12:25:27 crc kubenswrapper[4703]: I0130 12:25:27.449242 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerDied","Data":"b0524fb3e1551d249a0fbd6dc660597b23942b6881789d881cfbc7b65d3d1252"} Jan 30 12:25:27 crc kubenswrapper[4703]: I0130 12:25:27.449834 4703 scope.go:117] "RemoveContainer" containerID="3bfce38e9d96e6b409783611402f29538c98521cc81a7a3a5def494c1449093f" Jan 30 12:25:27 crc kubenswrapper[4703]: I0130 12:25:27.450760 4703 scope.go:117] "RemoveContainer" containerID="b0524fb3e1551d249a0fbd6dc660597b23942b6881789d881cfbc7b65d3d1252" Jan 30 12:25:27 crc kubenswrapper[4703]: E0130 12:25:27.452189 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:25:27 crc kubenswrapper[4703]: E0130 12:25:27.676474 4703 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fc19a6b_3cde_4bb5_9499_f5be846289da.slice/crio-conmon-b0524fb3e1551d249a0fbd6dc660597b23942b6881789d881cfbc7b65d3d1252.scope\": RecentStats: unable to find data in memory cache]" Jan 30 12:25:28 crc kubenswrapper[4703]: I0130 12:25:28.463595 4703 scope.go:117] "RemoveContainer" containerID="b0524fb3e1551d249a0fbd6dc660597b23942b6881789d881cfbc7b65d3d1252" Jan 30 12:25:28 crc kubenswrapper[4703]: E0130 12:25:28.464037 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:25:31 crc kubenswrapper[4703]: I0130 12:25:31.035432 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:25:31 crc kubenswrapper[4703]: I0130 12:25:31.037638 4703 scope.go:117] "RemoveContainer" containerID="b0524fb3e1551d249a0fbd6dc660597b23942b6881789d881cfbc7b65d3d1252" Jan 30 12:25:31 crc kubenswrapper[4703]: E0130 12:25:31.038027 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:25:31 crc kubenswrapper[4703]: I0130 12:25:31.087713 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:25:31 crc kubenswrapper[4703]: E0130 12:25:31.088090 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:25:35 crc kubenswrapper[4703]: I0130 12:25:35.168877 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 30 12:25:36 crc kubenswrapper[4703]: I0130 12:25:36.035445 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:25:36 crc kubenswrapper[4703]: I0130 12:25:36.036004 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:25:36 crc kubenswrapper[4703]: I0130 12:25:36.037083 4703 scope.go:117] "RemoveContainer" containerID="b0524fb3e1551d249a0fbd6dc660597b23942b6881789d881cfbc7b65d3d1252" Jan 30 12:25:36 crc kubenswrapper[4703]: E0130 12:25:36.037605 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:25:36 crc kubenswrapper[4703]: I0130 12:25:36.092949 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 30 12:25:40 crc kubenswrapper[4703]: I0130 12:25:40.610549 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" containerName="rabbitmq" containerID="cri-o://eef3e6e64c855e27b5c3120003637e4d0d0bb8bb8fb57989469aeb4a3bea0a85" gracePeriod=604795 Jan 30 12:25:41 crc kubenswrapper[4703]: I0130 12:25:41.469060 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" containerName="rabbitmq" containerID="cri-o://1336b0d45f54ef019a4efeeffadf12d12099f883fab274d1069b9b8e1746c41d" gracePeriod=604795 Jan 30 12:25:42 crc kubenswrapper[4703]: I0130 12:25:42.215828 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Jan 30 12:25:42 crc kubenswrapper[4703]: I0130 12:25:42.299916 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Jan 30 12:25:44 crc kubenswrapper[4703]: I0130 12:25:44.087117 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:25:44 crc kubenswrapper[4703]: E0130 12:25:44.088057 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.338717 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.432790 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-confd\") pod \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.433187 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-tls\") pod \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.433272 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-erlang-cookie-secret\") pod \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.433383 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-config-data\") pod \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.433450 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-erlang-cookie\") pod \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.433515 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-server-conf\") pod \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.433573 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-plugins\") pod \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.433630 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.433657 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-plugins-conf\") pod \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.433722 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gbd8j\" (UniqueName: \"kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-kube-api-access-gbd8j\") pod \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.433755 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-pod-info\") pod \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\" (UID: \"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5\") " Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.436672 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" (UID: "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.436958 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" (UID: "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.437407 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" (UID: "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.447145 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" (UID: "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.447274 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" (UID: "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.448655 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-pod-info" (OuterVolumeSpecName: "pod-info") pod "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" (UID: "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.453426 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "persistence") pod "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" (UID: "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.460376 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-kube-api-access-gbd8j" (OuterVolumeSpecName: "kube-api-access-gbd8j") pod "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" (UID: "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5"). InnerVolumeSpecName "kube-api-access-gbd8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.483948 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-config-data" (OuterVolumeSpecName: "config-data") pod "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" (UID: "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.531591 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-server-conf" (OuterVolumeSpecName: "server-conf") pod "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" (UID: "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.549512 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.549591 4703 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.549608 4703 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-server-conf\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.549649 4703 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.549692 4703 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.549736 4703 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.549747 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gbd8j\" (UniqueName: \"kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-kube-api-access-gbd8j\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.549757 4703 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-pod-info\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.549765 4703 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.549774 4703 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.613156 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" (UID: "0a06c5c6-2219-4e79-ae66-2d706ce1e8e5"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.656076 4703 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.656294 4703 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.656337 4703 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.697207 4703 generic.go:334] "Generic (PLEG): container finished" podID="0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" containerID="eef3e6e64c855e27b5c3120003637e4d0d0bb8bb8fb57989469aeb4a3bea0a85" exitCode=0 Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.697281 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5","Type":"ContainerDied","Data":"eef3e6e64c855e27b5c3120003637e4d0d0bb8bb8fb57989469aeb4a3bea0a85"} Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.697347 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.697378 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"0a06c5c6-2219-4e79-ae66-2d706ce1e8e5","Type":"ContainerDied","Data":"3da7eae58bf75f8f1addba1b20a1635775b798409eb0a38b061073326c19ab4c"} Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.697409 4703 scope.go:117] "RemoveContainer" containerID="eef3e6e64c855e27b5c3120003637e4d0d0bb8bb8fb57989469aeb4a3bea0a85" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.709055 4703 generic.go:334] "Generic (PLEG): container finished" podID="aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" containerID="1336b0d45f54ef019a4efeeffadf12d12099f883fab274d1069b9b8e1746c41d" exitCode=0 Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.709115 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b","Type":"ContainerDied","Data":"1336b0d45f54ef019a4efeeffadf12d12099f883fab274d1069b9b8e1746c41d"} Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.758482 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.758535 4703 scope.go:117] "RemoveContainer" containerID="481cd73606bf3baac9bc19db3659ddfb18cc1af4e8c3a636d9694a00c056d5b7" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.773701 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.785821 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 30 12:25:47 crc kubenswrapper[4703]: E0130 12:25:47.786812 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" containerName="setup-container" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.786950 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" containerName="setup-container" Jan 30 12:25:47 crc kubenswrapper[4703]: E0130 12:25:47.787042 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" containerName="rabbitmq" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.787150 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" containerName="rabbitmq" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.787498 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" containerName="rabbitmq" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.788995 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.792253 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.795366 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-tpbrp" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.795530 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.795687 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.795763 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.795868 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.801873 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.810995 4703 scope.go:117] "RemoveContainer" containerID="eef3e6e64c855e27b5c3120003637e4d0d0bb8bb8fb57989469aeb4a3bea0a85" Jan 30 12:25:47 crc kubenswrapper[4703]: E0130 12:25:47.813275 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eef3e6e64c855e27b5c3120003637e4d0d0bb8bb8fb57989469aeb4a3bea0a85\": container with ID starting with eef3e6e64c855e27b5c3120003637e4d0d0bb8bb8fb57989469aeb4a3bea0a85 not found: ID does not exist" containerID="eef3e6e64c855e27b5c3120003637e4d0d0bb8bb8fb57989469aeb4a3bea0a85" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.813408 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eef3e6e64c855e27b5c3120003637e4d0d0bb8bb8fb57989469aeb4a3bea0a85"} err="failed to get container status \"eef3e6e64c855e27b5c3120003637e4d0d0bb8bb8fb57989469aeb4a3bea0a85\": rpc error: code = NotFound desc = could not find container \"eef3e6e64c855e27b5c3120003637e4d0d0bb8bb8fb57989469aeb4a3bea0a85\": container with ID starting with eef3e6e64c855e27b5c3120003637e4d0d0bb8bb8fb57989469aeb4a3bea0a85 not found: ID does not exist" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.813503 4703 scope.go:117] "RemoveContainer" containerID="481cd73606bf3baac9bc19db3659ddfb18cc1af4e8c3a636d9694a00c056d5b7" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.813796 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 30 12:25:47 crc kubenswrapper[4703]: E0130 12:25:47.814183 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"481cd73606bf3baac9bc19db3659ddfb18cc1af4e8c3a636d9694a00c056d5b7\": container with ID starting with 481cd73606bf3baac9bc19db3659ddfb18cc1af4e8c3a636d9694a00c056d5b7 not found: ID does not exist" containerID="481cd73606bf3baac9bc19db3659ddfb18cc1af4e8c3a636d9694a00c056d5b7" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.814223 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"481cd73606bf3baac9bc19db3659ddfb18cc1af4e8c3a636d9694a00c056d5b7"} err="failed to get container status \"481cd73606bf3baac9bc19db3659ddfb18cc1af4e8c3a636d9694a00c056d5b7\": rpc error: code = NotFound desc = could not find container \"481cd73606bf3baac9bc19db3659ddfb18cc1af4e8c3a636d9694a00c056d5b7\": container with ID starting with 481cd73606bf3baac9bc19db3659ddfb18cc1af4e8c3a636d9694a00c056d5b7 not found: ID does not exist" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.861083 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2ec18b52-27e8-4a28-819d-dcb39325cbf7-config-data\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.861182 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2ec18b52-27e8-4a28-819d-dcb39325cbf7-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.861219 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2ec18b52-27e8-4a28-819d-dcb39325cbf7-server-conf\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.861252 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.861293 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2ec18b52-27e8-4a28-819d-dcb39325cbf7-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.861373 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2ec18b52-27e8-4a28-819d-dcb39325cbf7-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.861411 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2ec18b52-27e8-4a28-819d-dcb39325cbf7-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.861449 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2ec18b52-27e8-4a28-819d-dcb39325cbf7-pod-info\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.861466 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2ec18b52-27e8-4a28-819d-dcb39325cbf7-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.861493 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7q47v\" (UniqueName: \"kubernetes.io/projected/2ec18b52-27e8-4a28-819d-dcb39325cbf7-kube-api-access-7q47v\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.861512 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2ec18b52-27e8-4a28-819d-dcb39325cbf7-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.968200 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2ec18b52-27e8-4a28-819d-dcb39325cbf7-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.968297 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2ec18b52-27e8-4a28-819d-dcb39325cbf7-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.968378 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2ec18b52-27e8-4a28-819d-dcb39325cbf7-pod-info\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.968411 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2ec18b52-27e8-4a28-819d-dcb39325cbf7-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.968451 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7q47v\" (UniqueName: \"kubernetes.io/projected/2ec18b52-27e8-4a28-819d-dcb39325cbf7-kube-api-access-7q47v\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.968482 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2ec18b52-27e8-4a28-819d-dcb39325cbf7-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.968570 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2ec18b52-27e8-4a28-819d-dcb39325cbf7-config-data\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.968650 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2ec18b52-27e8-4a28-819d-dcb39325cbf7-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.968714 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2ec18b52-27e8-4a28-819d-dcb39325cbf7-server-conf\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.968763 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.968885 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2ec18b52-27e8-4a28-819d-dcb39325cbf7-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.971566 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/2ec18b52-27e8-4a28-819d-dcb39325cbf7-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.972680 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.984783 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/2ec18b52-27e8-4a28-819d-dcb39325cbf7-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.985007 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/2ec18b52-27e8-4a28-819d-dcb39325cbf7-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.985562 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/2ec18b52-27e8-4a28-819d-dcb39325cbf7-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.986739 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/2ec18b52-27e8-4a28-819d-dcb39325cbf7-server-conf\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.988366 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/2ec18b52-27e8-4a28-819d-dcb39325cbf7-config-data\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:47 crc kubenswrapper[4703]: I0130 12:25:47.990859 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/2ec18b52-27e8-4a28-819d-dcb39325cbf7-pod-info\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:47.999149 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/2ec18b52-27e8-4a28-819d-dcb39325cbf7-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.004538 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7q47v\" (UniqueName: \"kubernetes.io/projected/2ec18b52-27e8-4a28-819d-dcb39325cbf7-kube-api-access-7q47v\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.028010 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/2ec18b52-27e8-4a28-819d-dcb39325cbf7-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.071882 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-server-0\" (UID: \"2ec18b52-27e8-4a28-819d-dcb39325cbf7\") " pod="openstack/rabbitmq-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.125655 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.244313 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.282752 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-erlang-cookie\") pod \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.283190 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n94qd\" (UniqueName: \"kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-kube-api-access-n94qd\") pod \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.283263 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-config-data\") pod \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.283358 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-server-conf\") pod \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.283450 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-tls\") pod \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.283503 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-plugins\") pod \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.283526 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-pod-info\") pod \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.283563 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-confd\") pod \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.284166 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" (UID: "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.286713 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" (UID: "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.287508 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.287693 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-plugins-conf\") pod \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.287724 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-erlang-cookie-secret\") pod \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\" (UID: \"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b\") " Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.289477 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" (UID: "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.289815 4703 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.290110 4703 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.290149 4703 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.294639 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-kube-api-access-n94qd" (OuterVolumeSpecName: "kube-api-access-n94qd") pod "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" (UID: "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b"). InnerVolumeSpecName "kube-api-access-n94qd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.295834 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" (UID: "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.302185 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" (UID: "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.306207 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "persistence") pod "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" (UID: "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.306450 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-pod-info" (OuterVolumeSpecName: "pod-info") pod "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" (UID: "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.370884 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-config-data" (OuterVolumeSpecName: "config-data") pod "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" (UID: "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.393336 4703 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.393381 4703 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-pod-info\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.393429 4703 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.393442 4703 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.393455 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n94qd\" (UniqueName: \"kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-kube-api-access-n94qd\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.393467 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.416072 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-server-conf" (OuterVolumeSpecName: "server-conf") pod "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" (UID: "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.432282 4703 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.456427 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" (UID: "aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.495576 4703 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.495616 4703 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.495628 4703 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b-server-conf\") on node \"crc\" DevicePath \"\"" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.724598 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b","Type":"ContainerDied","Data":"204287faf16f8f53f34353c63cc1f9ce62cf869e369fb2750e9284e0786692b2"} Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.725153 4703 scope.go:117] "RemoveContainer" containerID="1336b0d45f54ef019a4efeeffadf12d12099f883fab274d1069b9b8e1746c41d" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.724678 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.771004 4703 scope.go:117] "RemoveContainer" containerID="fa64bfe6d18e3452d8e0ab152d7a52f728f181e2221def319dc3e38ef1880ae1" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.779222 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.789273 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.807761 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.818470 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 30 12:25:48 crc kubenswrapper[4703]: E0130 12:25:48.819109 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" containerName="rabbitmq" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.819160 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" containerName="rabbitmq" Jan 30 12:25:48 crc kubenswrapper[4703]: E0130 12:25:48.819224 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" containerName="setup-container" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.819234 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" containerName="setup-container" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.819551 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" containerName="rabbitmq" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.822083 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.827047 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.827421 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.827657 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.827814 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.828031 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-hlnmk" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.828230 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.828414 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.845220 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.906820 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.906889 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.907014 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.907113 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.907212 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.907258 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.907277 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.907387 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.907418 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.907445 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqshf\" (UniqueName: \"kubernetes.io/projected/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-kube-api-access-xqshf\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:48 crc kubenswrapper[4703]: I0130 12:25:48.907487 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.011146 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.011779 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.011825 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.011907 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.011898 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.011949 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.012022 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqshf\" (UniqueName: \"kubernetes.io/projected/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-kube-api-access-xqshf\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.012081 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.012177 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.012210 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.012393 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.012555 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.012746 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.014264 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.014308 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.014409 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.014433 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.019563 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.023879 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.023945 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.027011 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.041953 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqshf\" (UniqueName: \"kubernetes.io/projected/532d7bbf-dd48-4d8f-ae5e-27a35bb889e1-kube-api-access-xqshf\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.072625 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.110841 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a06c5c6-2219-4e79-ae66-2d706ce1e8e5" path="/var/lib/kubelet/pods/0a06c5c6-2219-4e79-ae66-2d706ce1e8e5/volumes" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.112048 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b" path="/var/lib/kubelet/pods/aedcd6eb-4e31-4c3a-89c1-1e8dc7adc44b/volumes" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.316224 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.757539 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"2ec18b52-27e8-4a28-819d-dcb39325cbf7","Type":"ContainerStarted","Data":"5620e90b905dd637f94ee6607a4ecc2b1792cf3607a9279dfb2708ecdb4317e7"} Jan 30 12:25:49 crc kubenswrapper[4703]: I0130 12:25:49.902387 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 30 12:25:49 crc kubenswrapper[4703]: W0130 12:25:49.902568 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod532d7bbf_dd48_4d8f_ae5e_27a35bb889e1.slice/crio-673309d0113a9520fa6ac50992fe328d71c3390399cf75d12d7f9dab8df3eb68 WatchSource:0}: Error finding container 673309d0113a9520fa6ac50992fe328d71c3390399cf75d12d7f9dab8df3eb68: Status 404 returned error can't find the container with id 673309d0113a9520fa6ac50992fe328d71c3390399cf75d12d7f9dab8df3eb68 Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.090003 4703 scope.go:117] "RemoveContainer" containerID="b0524fb3e1551d249a0fbd6dc660597b23942b6881789d881cfbc7b65d3d1252" Jan 30 12:25:50 crc kubenswrapper[4703]: E0130 12:25:50.091329 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.210584 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-t46r9"] Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.213406 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.216601 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.231279 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-t46r9"] Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.390681 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.390768 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlrwl\" (UniqueName: \"kubernetes.io/projected/218366cb-8368-428f-abdf-4576b594b953-kube-api-access-jlrwl\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.391037 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.391152 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.391287 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-config\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.391390 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.391428 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.494341 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-config\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.494505 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.494531 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.494633 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.494666 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlrwl\" (UniqueName: \"kubernetes.io/projected/218366cb-8368-428f-abdf-4576b594b953-kube-api-access-jlrwl\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.494733 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.494782 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.495677 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-config\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.495729 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-ovsdbserver-sb\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.496309 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-ovsdbserver-nb\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.496395 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-dns-swift-storage-0\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.497298 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-openstack-edpm-ipam\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.499622 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-dns-svc\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.600248 4703 scope.go:117] "RemoveContainer" containerID="4724661b321f6ce5f753731575ce55e816165e7242955e0a3f3e31646fd199b6" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.634026 4703 scope.go:117] "RemoveContainer" containerID="5d4de2c96495b8e480e0ca04a0935c3e72b6eaf5f09e6e682c24a701d9048831" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.648650 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlrwl\" (UniqueName: \"kubernetes.io/projected/218366cb-8368-428f-abdf-4576b594b953-kube-api-access-jlrwl\") pod \"dnsmasq-dns-79bd4cc8c9-t46r9\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.678500 4703 scope.go:117] "RemoveContainer" containerID="8fe7b8bd4dc90f3274209a5000c44bb8dbe912ba13e8d4d28647303849c18237" Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.779301 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1","Type":"ContainerStarted","Data":"673309d0113a9520fa6ac50992fe328d71c3390399cf75d12d7f9dab8df3eb68"} Jan 30 12:25:50 crc kubenswrapper[4703]: I0130 12:25:50.845792 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:51 crc kubenswrapper[4703]: I0130 12:25:51.392582 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-t46r9"] Jan 30 12:25:51 crc kubenswrapper[4703]: W0130 12:25:51.561424 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod218366cb_8368_428f_abdf_4576b594b953.slice/crio-c9860dea1d0d066d28de8165881308c006bd5321265dc35e7dbe272ba0f9ba2d WatchSource:0}: Error finding container c9860dea1d0d066d28de8165881308c006bd5321265dc35e7dbe272ba0f9ba2d: Status 404 returned error can't find the container with id c9860dea1d0d066d28de8165881308c006bd5321265dc35e7dbe272ba0f9ba2d Jan 30 12:25:51 crc kubenswrapper[4703]: I0130 12:25:51.809464 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" event={"ID":"218366cb-8368-428f-abdf-4576b594b953","Type":"ContainerStarted","Data":"c9860dea1d0d066d28de8165881308c006bd5321265dc35e7dbe272ba0f9ba2d"} Jan 30 12:25:52 crc kubenswrapper[4703]: I0130 12:25:52.823840 4703 generic.go:334] "Generic (PLEG): container finished" podID="218366cb-8368-428f-abdf-4576b594b953" containerID="ffafe600130d769d39c0a040ad52b5c837daf338203e308982ae92ae8410c8ad" exitCode=0 Jan 30 12:25:52 crc kubenswrapper[4703]: I0130 12:25:52.823913 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" event={"ID":"218366cb-8368-428f-abdf-4576b594b953","Type":"ContainerDied","Data":"ffafe600130d769d39c0a040ad52b5c837daf338203e308982ae92ae8410c8ad"} Jan 30 12:25:52 crc kubenswrapper[4703]: I0130 12:25:52.826310 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1","Type":"ContainerStarted","Data":"ddeeee9c3deb786d72f0b77c914f8bf2d74c18e650bd2dc8606a30543d7ac2a9"} Jan 30 12:25:52 crc kubenswrapper[4703]: I0130 12:25:52.842582 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"2ec18b52-27e8-4a28-819d-dcb39325cbf7","Type":"ContainerStarted","Data":"1f7e2609ee4b95a9f52f3694354edcee584c82f9ce5a2c8c1a1342a754efc0e1"} Jan 30 12:25:53 crc kubenswrapper[4703]: I0130 12:25:53.854430 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" event={"ID":"218366cb-8368-428f-abdf-4576b594b953","Type":"ContainerStarted","Data":"7eacabd31666238c98cabe10f1751a29cae787b6c6f855f18ba5a2e56efc8625"} Jan 30 12:25:53 crc kubenswrapper[4703]: I0130 12:25:53.885076 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" podStartSLOduration=3.885048044 podStartE2EDuration="3.885048044s" podCreationTimestamp="2026-01-30 12:25:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:25:53.876561306 +0000 UTC m=+1789.654382970" watchObservedRunningTime="2026-01-30 12:25:53.885048044 +0000 UTC m=+1789.662869698" Jan 30 12:25:54 crc kubenswrapper[4703]: I0130 12:25:54.864366 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:25:56 crc kubenswrapper[4703]: I0130 12:25:56.090409 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:25:56 crc kubenswrapper[4703]: E0130 12:25:56.091376 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:26:00 crc kubenswrapper[4703]: I0130 12:26:00.848278 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:26:00 crc kubenswrapper[4703]: I0130 12:26:00.926261 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-nb8zq"] Jan 30 12:26:00 crc kubenswrapper[4703]: I0130 12:26:00.926645 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" podUID="dbda6f00-1625-4d38-9932-de68c1af9bfe" containerName="dnsmasq-dns" containerID="cri-o://3805a13646b6ee14a03c75ce077d134e480717508c8322e4c44d0725a6da73a7" gracePeriod=10 Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.254653 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6cd9bffc9-p5rn7"] Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.261152 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.274366 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cd9bffc9-p5rn7"] Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.399744 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.399904 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-dns-svc\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.399963 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5c45\" (UniqueName: \"kubernetes.io/projected/50c4f4f0-c935-406c-86a7-791a8dd0e812-kube-api-access-w5c45\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.400034 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-openstack-edpm-ipam\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.400109 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-config\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.400268 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.400423 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.503503 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.503631 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.503710 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-dns-svc\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.503741 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5c45\" (UniqueName: \"kubernetes.io/projected/50c4f4f0-c935-406c-86a7-791a8dd0e812-kube-api-access-w5c45\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.503815 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-openstack-edpm-ipam\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.503844 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-config\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.503875 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.505405 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.505405 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.505405 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-openstack-edpm-ipam\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.505503 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-dns-svc\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.505631 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-config\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.506068 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/50c4f4f0-c935-406c-86a7-791a8dd0e812-dns-swift-storage-0\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.533198 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5c45\" (UniqueName: \"kubernetes.io/projected/50c4f4f0-c935-406c-86a7-791a8dd0e812-kube-api-access-w5c45\") pod \"dnsmasq-dns-6cd9bffc9-p5rn7\" (UID: \"50c4f4f0-c935-406c-86a7-791a8dd0e812\") " pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.741020 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.750264 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.853295 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-config\") pod \"dbda6f00-1625-4d38-9932-de68c1af9bfe\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.853518 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kz2nl\" (UniqueName: \"kubernetes.io/projected/dbda6f00-1625-4d38-9932-de68c1af9bfe-kube-api-access-kz2nl\") pod \"dbda6f00-1625-4d38-9932-de68c1af9bfe\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.853718 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-ovsdbserver-nb\") pod \"dbda6f00-1625-4d38-9932-de68c1af9bfe\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.853803 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-dns-swift-storage-0\") pod \"dbda6f00-1625-4d38-9932-de68c1af9bfe\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.853905 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-dns-svc\") pod \"dbda6f00-1625-4d38-9932-de68c1af9bfe\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.853939 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-ovsdbserver-sb\") pod \"dbda6f00-1625-4d38-9932-de68c1af9bfe\" (UID: \"dbda6f00-1625-4d38-9932-de68c1af9bfe\") " Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.868018 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbda6f00-1625-4d38-9932-de68c1af9bfe-kube-api-access-kz2nl" (OuterVolumeSpecName: "kube-api-access-kz2nl") pod "dbda6f00-1625-4d38-9932-de68c1af9bfe" (UID: "dbda6f00-1625-4d38-9932-de68c1af9bfe"). InnerVolumeSpecName "kube-api-access-kz2nl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.939738 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "dbda6f00-1625-4d38-9932-de68c1af9bfe" (UID: "dbda6f00-1625-4d38-9932-de68c1af9bfe"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.940891 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "dbda6f00-1625-4d38-9932-de68c1af9bfe" (UID: "dbda6f00-1625-4d38-9932-de68c1af9bfe"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.948933 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "dbda6f00-1625-4d38-9932-de68c1af9bfe" (UID: "dbda6f00-1625-4d38-9932-de68c1af9bfe"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.953858 4703 generic.go:334] "Generic (PLEG): container finished" podID="dbda6f00-1625-4d38-9932-de68c1af9bfe" containerID="3805a13646b6ee14a03c75ce077d134e480717508c8322e4c44d0725a6da73a7" exitCode=0 Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.953943 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" event={"ID":"dbda6f00-1625-4d38-9932-de68c1af9bfe","Type":"ContainerDied","Data":"3805a13646b6ee14a03c75ce077d134e480717508c8322e4c44d0725a6da73a7"} Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.954040 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" event={"ID":"dbda6f00-1625-4d38-9932-de68c1af9bfe","Type":"ContainerDied","Data":"a91db1eccd55fee8ccf9083febf4ccec59adb1bf42612ea1674439fce626965a"} Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.954090 4703 scope.go:117] "RemoveContainer" containerID="3805a13646b6ee14a03c75ce077d134e480717508c8322e4c44d0725a6da73a7" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.954413 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-89c5cd4d5-nb8zq" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.957215 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "dbda6f00-1625-4d38-9932-de68c1af9bfe" (UID: "dbda6f00-1625-4d38-9932-de68c1af9bfe"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.958903 4703 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.958933 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.958943 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kz2nl\" (UniqueName: \"kubernetes.io/projected/dbda6f00-1625-4d38-9932-de68c1af9bfe-kube-api-access-kz2nl\") on node \"crc\" DevicePath \"\"" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.958953 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.958961 4703 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:26:01 crc kubenswrapper[4703]: I0130 12:26:01.965183 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-config" (OuterVolumeSpecName: "config") pod "dbda6f00-1625-4d38-9932-de68c1af9bfe" (UID: "dbda6f00-1625-4d38-9932-de68c1af9bfe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:26:02 crc kubenswrapper[4703]: I0130 12:26:02.006573 4703 scope.go:117] "RemoveContainer" containerID="113d15d28afeb2077495878e5cb34ba57dd4e6d8d26fe1e8afd018b5b74ac25f" Jan 30 12:26:02 crc kubenswrapper[4703]: I0130 12:26:02.074934 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dbda6f00-1625-4d38-9932-de68c1af9bfe-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:26:02 crc kubenswrapper[4703]: I0130 12:26:02.075183 4703 scope.go:117] "RemoveContainer" containerID="3805a13646b6ee14a03c75ce077d134e480717508c8322e4c44d0725a6da73a7" Jan 30 12:26:02 crc kubenswrapper[4703]: E0130 12:26:02.081055 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3805a13646b6ee14a03c75ce077d134e480717508c8322e4c44d0725a6da73a7\": container with ID starting with 3805a13646b6ee14a03c75ce077d134e480717508c8322e4c44d0725a6da73a7 not found: ID does not exist" containerID="3805a13646b6ee14a03c75ce077d134e480717508c8322e4c44d0725a6da73a7" Jan 30 12:26:02 crc kubenswrapper[4703]: I0130 12:26:02.081147 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3805a13646b6ee14a03c75ce077d134e480717508c8322e4c44d0725a6da73a7"} err="failed to get container status \"3805a13646b6ee14a03c75ce077d134e480717508c8322e4c44d0725a6da73a7\": rpc error: code = NotFound desc = could not find container \"3805a13646b6ee14a03c75ce077d134e480717508c8322e4c44d0725a6da73a7\": container with ID starting with 3805a13646b6ee14a03c75ce077d134e480717508c8322e4c44d0725a6da73a7 not found: ID does not exist" Jan 30 12:26:02 crc kubenswrapper[4703]: I0130 12:26:02.081194 4703 scope.go:117] "RemoveContainer" containerID="113d15d28afeb2077495878e5cb34ba57dd4e6d8d26fe1e8afd018b5b74ac25f" Jan 30 12:26:02 crc kubenswrapper[4703]: E0130 12:26:02.081535 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"113d15d28afeb2077495878e5cb34ba57dd4e6d8d26fe1e8afd018b5b74ac25f\": container with ID starting with 113d15d28afeb2077495878e5cb34ba57dd4e6d8d26fe1e8afd018b5b74ac25f not found: ID does not exist" containerID="113d15d28afeb2077495878e5cb34ba57dd4e6d8d26fe1e8afd018b5b74ac25f" Jan 30 12:26:02 crc kubenswrapper[4703]: I0130 12:26:02.081597 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"113d15d28afeb2077495878e5cb34ba57dd4e6d8d26fe1e8afd018b5b74ac25f"} err="failed to get container status \"113d15d28afeb2077495878e5cb34ba57dd4e6d8d26fe1e8afd018b5b74ac25f\": rpc error: code = NotFound desc = could not find container \"113d15d28afeb2077495878e5cb34ba57dd4e6d8d26fe1e8afd018b5b74ac25f\": container with ID starting with 113d15d28afeb2077495878e5cb34ba57dd4e6d8d26fe1e8afd018b5b74ac25f not found: ID does not exist" Jan 30 12:26:02 crc kubenswrapper[4703]: I0130 12:26:02.297942 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-nb8zq"] Jan 30 12:26:02 crc kubenswrapper[4703]: I0130 12:26:02.308728 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-89c5cd4d5-nb8zq"] Jan 30 12:26:02 crc kubenswrapper[4703]: I0130 12:26:02.359852 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cd9bffc9-p5rn7"] Jan 30 12:26:02 crc kubenswrapper[4703]: I0130 12:26:02.968237 4703 generic.go:334] "Generic (PLEG): container finished" podID="50c4f4f0-c935-406c-86a7-791a8dd0e812" containerID="d4591d94125b553e751ae700bfddc452db8eeb5ba380aa0243a23e396b703011" exitCode=0 Jan 30 12:26:02 crc kubenswrapper[4703]: I0130 12:26:02.968356 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" event={"ID":"50c4f4f0-c935-406c-86a7-791a8dd0e812","Type":"ContainerDied","Data":"d4591d94125b553e751ae700bfddc452db8eeb5ba380aa0243a23e396b703011"} Jan 30 12:26:02 crc kubenswrapper[4703]: I0130 12:26:02.968716 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" event={"ID":"50c4f4f0-c935-406c-86a7-791a8dd0e812","Type":"ContainerStarted","Data":"49cc6dd7d18203f77ea0642e74f89fde5f29fce813490368ba5c360777b40636"} Jan 30 12:26:03 crc kubenswrapper[4703]: I0130 12:26:03.090157 4703 scope.go:117] "RemoveContainer" containerID="b0524fb3e1551d249a0fbd6dc660597b23942b6881789d881cfbc7b65d3d1252" Jan 30 12:26:03 crc kubenswrapper[4703]: E0130 12:26:03.090434 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:26:03 crc kubenswrapper[4703]: I0130 12:26:03.105845 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbda6f00-1625-4d38-9932-de68c1af9bfe" path="/var/lib/kubelet/pods/dbda6f00-1625-4d38-9932-de68c1af9bfe/volumes" Jan 30 12:26:03 crc kubenswrapper[4703]: I0130 12:26:03.985984 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" event={"ID":"50c4f4f0-c935-406c-86a7-791a8dd0e812","Type":"ContainerStarted","Data":"7b01696861c402826d5289ddde07808a497f9972f78829c000334a064f6769f3"} Jan 30 12:26:03 crc kubenswrapper[4703]: I0130 12:26:03.986421 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:04 crc kubenswrapper[4703]: I0130 12:26:04.024303 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" podStartSLOduration=3.024280725 podStartE2EDuration="3.024280725s" podCreationTimestamp="2026-01-30 12:26:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:26:04.010037001 +0000 UTC m=+1799.787858645" watchObservedRunningTime="2026-01-30 12:26:04.024280725 +0000 UTC m=+1799.802102379" Jan 30 12:26:07 crc kubenswrapper[4703]: I0130 12:26:07.086948 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:26:07 crc kubenswrapper[4703]: E0130 12:26:07.087909 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:26:11 crc kubenswrapper[4703]: I0130 12:26:11.743987 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6cd9bffc9-p5rn7" Jan 30 12:26:11 crc kubenswrapper[4703]: I0130 12:26:11.830671 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-t46r9"] Jan 30 12:26:11 crc kubenswrapper[4703]: I0130 12:26:11.831043 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" podUID="218366cb-8368-428f-abdf-4576b594b953" containerName="dnsmasq-dns" containerID="cri-o://7eacabd31666238c98cabe10f1751a29cae787b6c6f855f18ba5a2e56efc8625" gracePeriod=10 Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.222095 4703 generic.go:334] "Generic (PLEG): container finished" podID="218366cb-8368-428f-abdf-4576b594b953" containerID="7eacabd31666238c98cabe10f1751a29cae787b6c6f855f18ba5a2e56efc8625" exitCode=0 Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.222748 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" event={"ID":"218366cb-8368-428f-abdf-4576b594b953","Type":"ContainerDied","Data":"7eacabd31666238c98cabe10f1751a29cae787b6c6f855f18ba5a2e56efc8625"} Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.445857 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.535019 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-dns-swift-storage-0\") pod \"218366cb-8368-428f-abdf-4576b594b953\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.535105 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-config\") pod \"218366cb-8368-428f-abdf-4576b594b953\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.535213 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-openstack-edpm-ipam\") pod \"218366cb-8368-428f-abdf-4576b594b953\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.535310 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-ovsdbserver-sb\") pod \"218366cb-8368-428f-abdf-4576b594b953\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.535340 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-dns-svc\") pod \"218366cb-8368-428f-abdf-4576b594b953\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.535448 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-ovsdbserver-nb\") pod \"218366cb-8368-428f-abdf-4576b594b953\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.535590 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlrwl\" (UniqueName: \"kubernetes.io/projected/218366cb-8368-428f-abdf-4576b594b953-kube-api-access-jlrwl\") pod \"218366cb-8368-428f-abdf-4576b594b953\" (UID: \"218366cb-8368-428f-abdf-4576b594b953\") " Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.543863 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/218366cb-8368-428f-abdf-4576b594b953-kube-api-access-jlrwl" (OuterVolumeSpecName: "kube-api-access-jlrwl") pod "218366cb-8368-428f-abdf-4576b594b953" (UID: "218366cb-8368-428f-abdf-4576b594b953"). InnerVolumeSpecName "kube-api-access-jlrwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.604566 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "218366cb-8368-428f-abdf-4576b594b953" (UID: "218366cb-8368-428f-abdf-4576b594b953"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.607646 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "218366cb-8368-428f-abdf-4576b594b953" (UID: "218366cb-8368-428f-abdf-4576b594b953"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.610890 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "218366cb-8368-428f-abdf-4576b594b953" (UID: "218366cb-8368-428f-abdf-4576b594b953"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.618434 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "218366cb-8368-428f-abdf-4576b594b953" (UID: "218366cb-8368-428f-abdf-4576b594b953"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.639088 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.639154 4703 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.639164 4703 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.639174 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlrwl\" (UniqueName: \"kubernetes.io/projected/218366cb-8368-428f-abdf-4576b594b953-kube-api-access-jlrwl\") on node \"crc\" DevicePath \"\"" Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.639185 4703 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.643738 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "218366cb-8368-428f-abdf-4576b594b953" (UID: "218366cb-8368-428f-abdf-4576b594b953"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.645946 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-config" (OuterVolumeSpecName: "config") pod "218366cb-8368-428f-abdf-4576b594b953" (UID: "218366cb-8368-428f-abdf-4576b594b953"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.741513 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:26:12 crc kubenswrapper[4703]: I0130 12:26:12.741566 4703 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/218366cb-8368-428f-abdf-4576b594b953-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:26:13 crc kubenswrapper[4703]: I0130 12:26:13.237576 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" event={"ID":"218366cb-8368-428f-abdf-4576b594b953","Type":"ContainerDied","Data":"c9860dea1d0d066d28de8165881308c006bd5321265dc35e7dbe272ba0f9ba2d"} Jan 30 12:26:13 crc kubenswrapper[4703]: I0130 12:26:13.237921 4703 scope.go:117] "RemoveContainer" containerID="7eacabd31666238c98cabe10f1751a29cae787b6c6f855f18ba5a2e56efc8625" Jan 30 12:26:13 crc kubenswrapper[4703]: I0130 12:26:13.237646 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-79bd4cc8c9-t46r9" Jan 30 12:26:13 crc kubenswrapper[4703]: I0130 12:26:13.267464 4703 scope.go:117] "RemoveContainer" containerID="ffafe600130d769d39c0a040ad52b5c837daf338203e308982ae92ae8410c8ad" Jan 30 12:26:13 crc kubenswrapper[4703]: I0130 12:26:13.271471 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-t46r9"] Jan 30 12:26:13 crc kubenswrapper[4703]: I0130 12:26:13.283058 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-79bd4cc8c9-t46r9"] Jan 30 12:26:15 crc kubenswrapper[4703]: I0130 12:26:15.098795 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="218366cb-8368-428f-abdf-4576b594b953" path="/var/lib/kubelet/pods/218366cb-8368-428f-abdf-4576b594b953/volumes" Jan 30 12:26:17 crc kubenswrapper[4703]: I0130 12:26:17.087759 4703 scope.go:117] "RemoveContainer" containerID="b0524fb3e1551d249a0fbd6dc660597b23942b6881789d881cfbc7b65d3d1252" Jan 30 12:26:18 crc kubenswrapper[4703]: I0130 12:26:18.087962 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:26:18 crc kubenswrapper[4703]: E0130 12:26:18.088809 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:26:18 crc kubenswrapper[4703]: I0130 12:26:18.299914 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerStarted","Data":"56f50513b15f72d3dc117f3532e25ad9cf4d47a16a83a4e4592202c941ccbe86"} Jan 30 12:26:21 crc kubenswrapper[4703]: I0130 12:26:21.035930 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:26:21 crc kubenswrapper[4703]: I0130 12:26:21.332643 4703 generic.go:334] "Generic (PLEG): container finished" podID="2fc19a6b-3cde-4bb5-9499-f5be846289da" containerID="56f50513b15f72d3dc117f3532e25ad9cf4d47a16a83a4e4592202c941ccbe86" exitCode=1 Jan 30 12:26:21 crc kubenswrapper[4703]: I0130 12:26:21.332738 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerDied","Data":"56f50513b15f72d3dc117f3532e25ad9cf4d47a16a83a4e4592202c941ccbe86"} Jan 30 12:26:21 crc kubenswrapper[4703]: I0130 12:26:21.332837 4703 scope.go:117] "RemoveContainer" containerID="b0524fb3e1551d249a0fbd6dc660597b23942b6881789d881cfbc7b65d3d1252" Jan 30 12:26:21 crc kubenswrapper[4703]: I0130 12:26:21.333897 4703 scope.go:117] "RemoveContainer" containerID="56f50513b15f72d3dc117f3532e25ad9cf4d47a16a83a4e4592202c941ccbe86" Jan 30 12:26:21 crc kubenswrapper[4703]: E0130 12:26:21.334419 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.135384 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp"] Jan 30 12:26:24 crc kubenswrapper[4703]: E0130 12:26:24.137003 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="218366cb-8368-428f-abdf-4576b594b953" containerName="dnsmasq-dns" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.137032 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="218366cb-8368-428f-abdf-4576b594b953" containerName="dnsmasq-dns" Jan 30 12:26:24 crc kubenswrapper[4703]: E0130 12:26:24.137068 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbda6f00-1625-4d38-9932-de68c1af9bfe" containerName="dnsmasq-dns" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.137078 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbda6f00-1625-4d38-9932-de68c1af9bfe" containerName="dnsmasq-dns" Jan 30 12:26:24 crc kubenswrapper[4703]: E0130 12:26:24.137089 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="218366cb-8368-428f-abdf-4576b594b953" containerName="init" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.137098 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="218366cb-8368-428f-abdf-4576b594b953" containerName="init" Jan 30 12:26:24 crc kubenswrapper[4703]: E0130 12:26:24.137144 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbda6f00-1625-4d38-9932-de68c1af9bfe" containerName="init" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.137155 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbda6f00-1625-4d38-9932-de68c1af9bfe" containerName="init" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.137503 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="218366cb-8368-428f-abdf-4576b594b953" containerName="dnsmasq-dns" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.137530 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbda6f00-1625-4d38-9932-de68c1af9bfe" containerName="dnsmasq-dns" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.138714 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.142912 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.143467 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.143537 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.144207 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-jjdl5" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.184914 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp\" (UID: \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.185017 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp\" (UID: \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.185053 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rgd2q\" (UniqueName: \"kubernetes.io/projected/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-kube-api-access-rgd2q\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp\" (UID: \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.185203 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp\" (UID: \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.186507 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp"] Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.288243 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp\" (UID: \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.288579 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp\" (UID: \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.288644 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp\" (UID: \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.288693 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rgd2q\" (UniqueName: \"kubernetes.io/projected/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-kube-api-access-rgd2q\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp\" (UID: \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.297155 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp\" (UID: \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.297415 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp\" (UID: \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.297769 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp\" (UID: \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.312032 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rgd2q\" (UniqueName: \"kubernetes.io/projected/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-kube-api-access-rgd2q\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp\" (UID: \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.371903 4703 generic.go:334] "Generic (PLEG): container finished" podID="532d7bbf-dd48-4d8f-ae5e-27a35bb889e1" containerID="ddeeee9c3deb786d72f0b77c914f8bf2d74c18e650bd2dc8606a30543d7ac2a9" exitCode=0 Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.371985 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1","Type":"ContainerDied","Data":"ddeeee9c3deb786d72f0b77c914f8bf2d74c18e650bd2dc8606a30543d7ac2a9"} Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.376772 4703 generic.go:334] "Generic (PLEG): container finished" podID="2ec18b52-27e8-4a28-819d-dcb39325cbf7" containerID="1f7e2609ee4b95a9f52f3694354edcee584c82f9ce5a2c8c1a1342a754efc0e1" exitCode=0 Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.376841 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"2ec18b52-27e8-4a28-819d-dcb39325cbf7","Type":"ContainerDied","Data":"1f7e2609ee4b95a9f52f3694354edcee584c82f9ce5a2c8c1a1342a754efc0e1"} Jan 30 12:26:24 crc kubenswrapper[4703]: I0130 12:26:24.600378 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" Jan 30 12:26:25 crc kubenswrapper[4703]: I0130 12:26:25.392886 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"532d7bbf-dd48-4d8f-ae5e-27a35bb889e1","Type":"ContainerStarted","Data":"722dbc07d3d50ab8232d03bbcd9cc8d1b1c1c37005237df0f7a315d927e5d8bc"} Jan 30 12:26:25 crc kubenswrapper[4703]: I0130 12:26:25.394090 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:26:25 crc kubenswrapper[4703]: I0130 12:26:25.396407 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"2ec18b52-27e8-4a28-819d-dcb39325cbf7","Type":"ContainerStarted","Data":"6bfbada04d3aea2193559375def78add943941d2a385b9bb5c9b667868638bfc"} Jan 30 12:26:25 crc kubenswrapper[4703]: I0130 12:26:25.396806 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 30 12:26:25 crc kubenswrapper[4703]: I0130 12:26:25.435391 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.435350661 podStartE2EDuration="37.435350661s" podCreationTimestamp="2026-01-30 12:25:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:26:25.415482576 +0000 UTC m=+1821.193304230" watchObservedRunningTime="2026-01-30 12:26:25.435350661 +0000 UTC m=+1821.213172315" Jan 30 12:26:25 crc kubenswrapper[4703]: I0130 12:26:25.454331 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.454305314 podStartE2EDuration="38.454305314s" podCreationTimestamp="2026-01-30 12:25:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:26:25.441809226 +0000 UTC m=+1821.219630880" watchObservedRunningTime="2026-01-30 12:26:25.454305314 +0000 UTC m=+1821.232126968" Jan 30 12:26:25 crc kubenswrapper[4703]: I0130 12:26:25.493067 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp"] Jan 30 12:26:25 crc kubenswrapper[4703]: I0130 12:26:25.504494 4703 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 12:26:26 crc kubenswrapper[4703]: I0130 12:26:26.034502 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:26:26 crc kubenswrapper[4703]: I0130 12:26:26.035405 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:26:26 crc kubenswrapper[4703]: I0130 12:26:26.035505 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:26:26 crc kubenswrapper[4703]: I0130 12:26:26.036280 4703 scope.go:117] "RemoveContainer" containerID="56f50513b15f72d3dc117f3532e25ad9cf4d47a16a83a4e4592202c941ccbe86" Jan 30 12:26:26 crc kubenswrapper[4703]: E0130 12:26:26.036634 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:26:26 crc kubenswrapper[4703]: I0130 12:26:26.412072 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" event={"ID":"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd","Type":"ContainerStarted","Data":"482cd51868fae94737609857ff3bf5887a0b7d50b86592c78e56f32fa8ebd234"} Jan 30 12:26:26 crc kubenswrapper[4703]: I0130 12:26:26.412963 4703 scope.go:117] "RemoveContainer" containerID="56f50513b15f72d3dc117f3532e25ad9cf4d47a16a83a4e4592202c941ccbe86" Jan 30 12:26:26 crc kubenswrapper[4703]: E0130 12:26:26.413522 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:26:30 crc kubenswrapper[4703]: I0130 12:26:30.087354 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:26:30 crc kubenswrapper[4703]: E0130 12:26:30.088886 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:26:38 crc kubenswrapper[4703]: I0130 12:26:38.129772 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="2ec18b52-27e8-4a28-819d-dcb39325cbf7" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.228:5671: connect: connection refused" Jan 30 12:26:38 crc kubenswrapper[4703]: I0130 12:26:38.824913 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" event={"ID":"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd","Type":"ContainerStarted","Data":"8d7e6d453bb585cc5d7e6de51c2eaadc2f5b9ab66f67731ba53ce041a6bad433"} Jan 30 12:26:38 crc kubenswrapper[4703]: I0130 12:26:38.857106 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" podStartSLOduration=1.896244849 podStartE2EDuration="14.857087588s" podCreationTimestamp="2026-01-30 12:26:24 +0000 UTC" firstStartedPulling="2026-01-30 12:26:25.504179084 +0000 UTC m=+1821.282000738" lastFinishedPulling="2026-01-30 12:26:38.465021823 +0000 UTC m=+1834.242843477" observedRunningTime="2026-01-30 12:26:38.851581398 +0000 UTC m=+1834.629403052" watchObservedRunningTime="2026-01-30 12:26:38.857087588 +0000 UTC m=+1834.634909242" Jan 30 12:26:39 crc kubenswrapper[4703]: I0130 12:26:39.320386 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 30 12:26:41 crc kubenswrapper[4703]: I0130 12:26:41.086637 4703 scope.go:117] "RemoveContainer" containerID="56f50513b15f72d3dc117f3532e25ad9cf4d47a16a83a4e4592202c941ccbe86" Jan 30 12:26:41 crc kubenswrapper[4703]: E0130 12:26:41.087822 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:26:44 crc kubenswrapper[4703]: I0130 12:26:44.086579 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:26:44 crc kubenswrapper[4703]: E0130 12:26:44.087375 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:26:48 crc kubenswrapper[4703]: I0130 12:26:48.128793 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 30 12:26:51 crc kubenswrapper[4703]: I0130 12:26:51.981280 4703 generic.go:334] "Generic (PLEG): container finished" podID="e0551c8c-34a1-4ca3-ab12-6aff9684e1bd" containerID="8d7e6d453bb585cc5d7e6de51c2eaadc2f5b9ab66f67731ba53ce041a6bad433" exitCode=0 Jan 30 12:26:51 crc kubenswrapper[4703]: I0130 12:26:51.981358 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" event={"ID":"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd","Type":"ContainerDied","Data":"8d7e6d453bb585cc5d7e6de51c2eaadc2f5b9ab66f67731ba53ce041a6bad433"} Jan 30 12:26:53 crc kubenswrapper[4703]: I0130 12:26:53.520154 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" Jan 30 12:26:53 crc kubenswrapper[4703]: I0130 12:26:53.701047 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rgd2q\" (UniqueName: \"kubernetes.io/projected/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-kube-api-access-rgd2q\") pod \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\" (UID: \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\") " Jan 30 12:26:53 crc kubenswrapper[4703]: I0130 12:26:53.701930 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-repo-setup-combined-ca-bundle\") pod \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\" (UID: \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\") " Jan 30 12:26:53 crc kubenswrapper[4703]: I0130 12:26:53.702165 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-ssh-key-openstack-edpm-ipam\") pod \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\" (UID: \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\") " Jan 30 12:26:53 crc kubenswrapper[4703]: I0130 12:26:53.702351 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-inventory\") pod \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\" (UID: \"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd\") " Jan 30 12:26:53 crc kubenswrapper[4703]: I0130 12:26:53.711099 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "e0551c8c-34a1-4ca3-ab12-6aff9684e1bd" (UID: "e0551c8c-34a1-4ca3-ab12-6aff9684e1bd"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:26:53 crc kubenswrapper[4703]: I0130 12:26:53.711848 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-kube-api-access-rgd2q" (OuterVolumeSpecName: "kube-api-access-rgd2q") pod "e0551c8c-34a1-4ca3-ab12-6aff9684e1bd" (UID: "e0551c8c-34a1-4ca3-ab12-6aff9684e1bd"). InnerVolumeSpecName "kube-api-access-rgd2q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:26:53 crc kubenswrapper[4703]: I0130 12:26:53.739964 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-inventory" (OuterVolumeSpecName: "inventory") pod "e0551c8c-34a1-4ca3-ab12-6aff9684e1bd" (UID: "e0551c8c-34a1-4ca3-ab12-6aff9684e1bd"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:26:53 crc kubenswrapper[4703]: I0130 12:26:53.741780 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "e0551c8c-34a1-4ca3-ab12-6aff9684e1bd" (UID: "e0551c8c-34a1-4ca3-ab12-6aff9684e1bd"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:26:53 crc kubenswrapper[4703]: I0130 12:26:53.805636 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rgd2q\" (UniqueName: \"kubernetes.io/projected/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-kube-api-access-rgd2q\") on node \"crc\" DevicePath \"\"" Jan 30 12:26:53 crc kubenswrapper[4703]: I0130 12:26:53.805705 4703 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:26:53 crc kubenswrapper[4703]: I0130 12:26:53.805719 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:26:53 crc kubenswrapper[4703]: I0130 12:26:53.805733 4703 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0551c8c-34a1-4ca3-ab12-6aff9684e1bd-inventory\") on node \"crc\" DevicePath \"\"" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.009460 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" event={"ID":"e0551c8c-34a1-4ca3-ab12-6aff9684e1bd","Type":"ContainerDied","Data":"482cd51868fae94737609857ff3bf5887a0b7d50b86592c78e56f32fa8ebd234"} Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.009523 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="482cd51868fae94737609857ff3bf5887a0b7d50b86592c78e56f32fa8ebd234" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.009570 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.106795 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8"] Jan 30 12:26:54 crc kubenswrapper[4703]: E0130 12:26:54.107437 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0551c8c-34a1-4ca3-ab12-6aff9684e1bd" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.107471 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0551c8c-34a1-4ca3-ab12-6aff9684e1bd" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.107782 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0551c8c-34a1-4ca3-ab12-6aff9684e1bd" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.108815 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.114001 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.115202 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.115512 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.117095 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-jjdl5" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.169927 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8"] Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.212822 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2fb337f3-59f5-45cf-9e47-dde8e6dac066-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wlvc8\" (UID: \"2fb337f3-59f5-45cf-9e47-dde8e6dac066\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.213695 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2fb337f3-59f5-45cf-9e47-dde8e6dac066-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wlvc8\" (UID: \"2fb337f3-59f5-45cf-9e47-dde8e6dac066\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.214174 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-525q7\" (UniqueName: \"kubernetes.io/projected/2fb337f3-59f5-45cf-9e47-dde8e6dac066-kube-api-access-525q7\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wlvc8\" (UID: \"2fb337f3-59f5-45cf-9e47-dde8e6dac066\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.316334 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2fb337f3-59f5-45cf-9e47-dde8e6dac066-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wlvc8\" (UID: \"2fb337f3-59f5-45cf-9e47-dde8e6dac066\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.316467 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2fb337f3-59f5-45cf-9e47-dde8e6dac066-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wlvc8\" (UID: \"2fb337f3-59f5-45cf-9e47-dde8e6dac066\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.316545 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-525q7\" (UniqueName: \"kubernetes.io/projected/2fb337f3-59f5-45cf-9e47-dde8e6dac066-kube-api-access-525q7\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wlvc8\" (UID: \"2fb337f3-59f5-45cf-9e47-dde8e6dac066\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.322130 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2fb337f3-59f5-45cf-9e47-dde8e6dac066-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wlvc8\" (UID: \"2fb337f3-59f5-45cf-9e47-dde8e6dac066\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.323504 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2fb337f3-59f5-45cf-9e47-dde8e6dac066-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wlvc8\" (UID: \"2fb337f3-59f5-45cf-9e47-dde8e6dac066\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.337349 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-525q7\" (UniqueName: \"kubernetes.io/projected/2fb337f3-59f5-45cf-9e47-dde8e6dac066-kube-api-access-525q7\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-wlvc8\" (UID: \"2fb337f3-59f5-45cf-9e47-dde8e6dac066\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" Jan 30 12:26:54 crc kubenswrapper[4703]: I0130 12:26:54.428751 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" Jan 30 12:26:55 crc kubenswrapper[4703]: I0130 12:26:55.009556 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8"] Jan 30 12:26:55 crc kubenswrapper[4703]: W0130 12:26:55.013538 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fb337f3_59f5_45cf_9e47_dde8e6dac066.slice/crio-4dda82083fc4dd9c9f9281f1110ad648b880fc9c4054d2e89c245c0b6e1f49b0 WatchSource:0}: Error finding container 4dda82083fc4dd9c9f9281f1110ad648b880fc9c4054d2e89c245c0b6e1f49b0: Status 404 returned error can't find the container with id 4dda82083fc4dd9c9f9281f1110ad648b880fc9c4054d2e89c245c0b6e1f49b0 Jan 30 12:26:56 crc kubenswrapper[4703]: I0130 12:26:56.038774 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" event={"ID":"2fb337f3-59f5-45cf-9e47-dde8e6dac066","Type":"ContainerStarted","Data":"4dda82083fc4dd9c9f9281f1110ad648b880fc9c4054d2e89c245c0b6e1f49b0"} Jan 30 12:26:56 crc kubenswrapper[4703]: I0130 12:26:56.087250 4703 scope.go:117] "RemoveContainer" containerID="56f50513b15f72d3dc117f3532e25ad9cf4d47a16a83a4e4592202c941ccbe86" Jan 30 12:26:56 crc kubenswrapper[4703]: E0130 12:26:56.087582 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:26:57 crc kubenswrapper[4703]: I0130 12:26:57.159874 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" event={"ID":"2fb337f3-59f5-45cf-9e47-dde8e6dac066","Type":"ContainerStarted","Data":"811608aca58e0e105a2c8ae2d150b1998d8036753e4369e15f255150651ebb2f"} Jan 30 12:26:57 crc kubenswrapper[4703]: I0130 12:26:57.186596 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" podStartSLOduration=2.742289093 podStartE2EDuration="3.186565986s" podCreationTimestamp="2026-01-30 12:26:54 +0000 UTC" firstStartedPulling="2026-01-30 12:26:55.331614939 +0000 UTC m=+1851.109436583" lastFinishedPulling="2026-01-30 12:26:55.775891822 +0000 UTC m=+1851.553713476" observedRunningTime="2026-01-30 12:26:57.181180178 +0000 UTC m=+1852.959001832" watchObservedRunningTime="2026-01-30 12:26:57.186565986 +0000 UTC m=+1852.964387640" Jan 30 12:26:58 crc kubenswrapper[4703]: I0130 12:26:58.086048 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:26:58 crc kubenswrapper[4703]: E0130 12:26:58.086796 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:26:59 crc kubenswrapper[4703]: I0130 12:26:59.190867 4703 generic.go:334] "Generic (PLEG): container finished" podID="2fb337f3-59f5-45cf-9e47-dde8e6dac066" containerID="811608aca58e0e105a2c8ae2d150b1998d8036753e4369e15f255150651ebb2f" exitCode=0 Jan 30 12:26:59 crc kubenswrapper[4703]: I0130 12:26:59.190962 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" event={"ID":"2fb337f3-59f5-45cf-9e47-dde8e6dac066","Type":"ContainerDied","Data":"811608aca58e0e105a2c8ae2d150b1998d8036753e4369e15f255150651ebb2f"} Jan 30 12:27:00 crc kubenswrapper[4703]: I0130 12:27:00.960821 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.132512 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-525q7\" (UniqueName: \"kubernetes.io/projected/2fb337f3-59f5-45cf-9e47-dde8e6dac066-kube-api-access-525q7\") pod \"2fb337f3-59f5-45cf-9e47-dde8e6dac066\" (UID: \"2fb337f3-59f5-45cf-9e47-dde8e6dac066\") " Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.132937 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2fb337f3-59f5-45cf-9e47-dde8e6dac066-inventory\") pod \"2fb337f3-59f5-45cf-9e47-dde8e6dac066\" (UID: \"2fb337f3-59f5-45cf-9e47-dde8e6dac066\") " Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.132987 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2fb337f3-59f5-45cf-9e47-dde8e6dac066-ssh-key-openstack-edpm-ipam\") pod \"2fb337f3-59f5-45cf-9e47-dde8e6dac066\" (UID: \"2fb337f3-59f5-45cf-9e47-dde8e6dac066\") " Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.152312 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fb337f3-59f5-45cf-9e47-dde8e6dac066-kube-api-access-525q7" (OuterVolumeSpecName: "kube-api-access-525q7") pod "2fb337f3-59f5-45cf-9e47-dde8e6dac066" (UID: "2fb337f3-59f5-45cf-9e47-dde8e6dac066"). InnerVolumeSpecName "kube-api-access-525q7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.165421 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fb337f3-59f5-45cf-9e47-dde8e6dac066-inventory" (OuterVolumeSpecName: "inventory") pod "2fb337f3-59f5-45cf-9e47-dde8e6dac066" (UID: "2fb337f3-59f5-45cf-9e47-dde8e6dac066"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.168029 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fb337f3-59f5-45cf-9e47-dde8e6dac066-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "2fb337f3-59f5-45cf-9e47-dde8e6dac066" (UID: "2fb337f3-59f5-45cf-9e47-dde8e6dac066"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.216108 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" event={"ID":"2fb337f3-59f5-45cf-9e47-dde8e6dac066","Type":"ContainerDied","Data":"4dda82083fc4dd9c9f9281f1110ad648b880fc9c4054d2e89c245c0b6e1f49b0"} Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.216174 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4dda82083fc4dd9c9f9281f1110ad648b880fc9c4054d2e89c245c0b6e1f49b0" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.216241 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-wlvc8" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.237828 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-525q7\" (UniqueName: \"kubernetes.io/projected/2fb337f3-59f5-45cf-9e47-dde8e6dac066-kube-api-access-525q7\") on node \"crc\" DevicePath \"\"" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.238164 4703 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2fb337f3-59f5-45cf-9e47-dde8e6dac066-inventory\") on node \"crc\" DevicePath \"\"" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.238183 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2fb337f3-59f5-45cf-9e47-dde8e6dac066-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.394147 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t"] Jan 30 12:27:01 crc kubenswrapper[4703]: E0130 12:27:01.394879 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fb337f3-59f5-45cf-9e47-dde8e6dac066" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.394914 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fb337f3-59f5-45cf-9e47-dde8e6dac066" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.395197 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fb337f3-59f5-45cf-9e47-dde8e6dac066" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.400433 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.409022 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t"] Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.409692 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.410091 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-jjdl5" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.410961 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.411245 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.443204 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnh4v\" (UniqueName: \"kubernetes.io/projected/d7bb23ae-c393-4b73-a856-b61d160d513d-kube-api-access-xnh4v\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t\" (UID: \"d7bb23ae-c393-4b73-a856-b61d160d513d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.443646 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t\" (UID: \"d7bb23ae-c393-4b73-a856-b61d160d513d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.443901 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t\" (UID: \"d7bb23ae-c393-4b73-a856-b61d160d513d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.444003 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t\" (UID: \"d7bb23ae-c393-4b73-a856-b61d160d513d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.546929 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnh4v\" (UniqueName: \"kubernetes.io/projected/d7bb23ae-c393-4b73-a856-b61d160d513d-kube-api-access-xnh4v\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t\" (UID: \"d7bb23ae-c393-4b73-a856-b61d160d513d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.547678 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t\" (UID: \"d7bb23ae-c393-4b73-a856-b61d160d513d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.547766 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t\" (UID: \"d7bb23ae-c393-4b73-a856-b61d160d513d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.547798 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t\" (UID: \"d7bb23ae-c393-4b73-a856-b61d160d513d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.555048 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t\" (UID: \"d7bb23ae-c393-4b73-a856-b61d160d513d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.555114 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t\" (UID: \"d7bb23ae-c393-4b73-a856-b61d160d513d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.569893 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t\" (UID: \"d7bb23ae-c393-4b73-a856-b61d160d513d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.573237 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnh4v\" (UniqueName: \"kubernetes.io/projected/d7bb23ae-c393-4b73-a856-b61d160d513d-kube-api-access-xnh4v\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t\" (UID: \"d7bb23ae-c393-4b73-a856-b61d160d513d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" Jan 30 12:27:01 crc kubenswrapper[4703]: I0130 12:27:01.723736 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" Jan 30 12:27:02 crc kubenswrapper[4703]: I0130 12:27:02.315320 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t"] Jan 30 12:27:03 crc kubenswrapper[4703]: I0130 12:27:03.242009 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" event={"ID":"d7bb23ae-c393-4b73-a856-b61d160d513d","Type":"ContainerStarted","Data":"9585fc35f92406504ff487d3c51fe659281d60ce77c9e2b8ce234ff45f244224"} Jan 30 12:27:03 crc kubenswrapper[4703]: I0130 12:27:03.242092 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" event={"ID":"d7bb23ae-c393-4b73-a856-b61d160d513d","Type":"ContainerStarted","Data":"33e89bf64304a9cbb29e1747f42a6fa0e0e20edb5b5dff1921fa80a0bbafafa7"} Jan 30 12:27:03 crc kubenswrapper[4703]: I0130 12:27:03.275050 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" podStartSLOduration=1.833508852 podStartE2EDuration="2.275015335s" podCreationTimestamp="2026-01-30 12:27:01 +0000 UTC" firstStartedPulling="2026-01-30 12:27:02.327446504 +0000 UTC m=+1858.105268158" lastFinishedPulling="2026-01-30 12:27:02.768952987 +0000 UTC m=+1858.546774641" observedRunningTime="2026-01-30 12:27:03.261318916 +0000 UTC m=+1859.039140580" watchObservedRunningTime="2026-01-30 12:27:03.275015335 +0000 UTC m=+1859.052836989" Jan 30 12:27:07 crc kubenswrapper[4703]: I0130 12:27:07.087427 4703 scope.go:117] "RemoveContainer" containerID="56f50513b15f72d3dc117f3532e25ad9cf4d47a16a83a4e4592202c941ccbe86" Jan 30 12:27:07 crc kubenswrapper[4703]: E0130 12:27:07.088628 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:27:10 crc kubenswrapper[4703]: I0130 12:27:10.088208 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:27:10 crc kubenswrapper[4703]: E0130 12:27:10.089775 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:27:22 crc kubenswrapper[4703]: I0130 12:27:22.086614 4703 scope.go:117] "RemoveContainer" containerID="56f50513b15f72d3dc117f3532e25ad9cf4d47a16a83a4e4592202c941ccbe86" Jan 30 12:27:22 crc kubenswrapper[4703]: E0130 12:27:22.087748 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:27:23 crc kubenswrapper[4703]: I0130 12:27:23.086864 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:27:23 crc kubenswrapper[4703]: E0130 12:27:23.087877 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:27:33 crc kubenswrapper[4703]: I0130 12:27:33.257690 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-6e21-account-create-update-6fr54"] Jan 30 12:27:33 crc kubenswrapper[4703]: I0130 12:27:33.273705 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-6e21-account-create-update-6fr54"] Jan 30 12:27:33 crc kubenswrapper[4703]: I0130 12:27:33.287567 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-25gg4"] Jan 30 12:27:33 crc kubenswrapper[4703]: I0130 12:27:33.300359 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-25gg4"] Jan 30 12:27:35 crc kubenswrapper[4703]: I0130 12:27:35.101602 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05cf9256-9286-4808-9d1c-9e73e8fa2860" path="/var/lib/kubelet/pods/05cf9256-9286-4808-9d1c-9e73e8fa2860/volumes" Jan 30 12:27:35 crc kubenswrapper[4703]: I0130 12:27:35.103015 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0763f436-e964-45c5-92f3-bb4cebeb01a9" path="/var/lib/kubelet/pods/0763f436-e964-45c5-92f3-bb4cebeb01a9/volumes" Jan 30 12:27:37 crc kubenswrapper[4703]: I0130 12:27:37.086980 4703 scope.go:117] "RemoveContainer" containerID="56f50513b15f72d3dc117f3532e25ad9cf4d47a16a83a4e4592202c941ccbe86" Jan 30 12:27:37 crc kubenswrapper[4703]: E0130 12:27:37.087907 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:27:38 crc kubenswrapper[4703]: I0130 12:27:38.087611 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:27:38 crc kubenswrapper[4703]: E0130 12:27:38.087868 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:27:41 crc kubenswrapper[4703]: I0130 12:27:41.041087 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-create-fdwhn"] Jan 30 12:27:41 crc kubenswrapper[4703]: I0130 12:27:41.053012 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-create-fdwhn"] Jan 30 12:27:41 crc kubenswrapper[4703]: I0130 12:27:41.102622 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2dfaefc7-3070-4d23-815d-164d669de123" path="/var/lib/kubelet/pods/2dfaefc7-3070-4d23-815d-164d669de123/volumes" Jan 30 12:27:43 crc kubenswrapper[4703]: I0130 12:27:43.035281 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-badc-account-create-update-v5rg5"] Jan 30 12:27:43 crc kubenswrapper[4703]: I0130 12:27:43.046866 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-badc-account-create-update-v5rg5"] Jan 30 12:27:43 crc kubenswrapper[4703]: I0130 12:27:43.100499 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c7d9a80-1086-4991-89b9-d8dffe01eadd" path="/var/lib/kubelet/pods/1c7d9a80-1086-4991-89b9-d8dffe01eadd/volumes" Jan 30 12:27:45 crc kubenswrapper[4703]: I0130 12:27:45.053495 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-l67m7"] Jan 30 12:27:45 crc kubenswrapper[4703]: I0130 12:27:45.070059 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-e48f-account-create-update-mkwjg"] Jan 30 12:27:45 crc kubenswrapper[4703]: I0130 12:27:45.083899 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-kttq7"] Jan 30 12:27:45 crc kubenswrapper[4703]: I0130 12:27:45.101921 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-ee05-account-create-update-dgl6z"] Jan 30 12:27:45 crc kubenswrapper[4703]: I0130 12:27:45.109692 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-e48f-account-create-update-mkwjg"] Jan 30 12:27:45 crc kubenswrapper[4703]: I0130 12:27:45.132002 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-l67m7"] Jan 30 12:27:45 crc kubenswrapper[4703]: I0130 12:27:45.159114 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-kttq7"] Jan 30 12:27:45 crc kubenswrapper[4703]: I0130 12:27:45.175899 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-ee05-account-create-update-dgl6z"] Jan 30 12:27:47 crc kubenswrapper[4703]: I0130 12:27:47.101455 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="107ee781-3758-4b91-81d2-f934501de19e" path="/var/lib/kubelet/pods/107ee781-3758-4b91-81d2-f934501de19e/volumes" Jan 30 12:27:47 crc kubenswrapper[4703]: I0130 12:27:47.103975 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0" path="/var/lib/kubelet/pods/1dcb866e-ecb2-4eec-9cc5-6cf2c46dd9b0/volumes" Jan 30 12:27:47 crc kubenswrapper[4703]: I0130 12:27:47.104926 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47c89c69-b5ba-480b-bd12-1aeaaf6cbc01" path="/var/lib/kubelet/pods/47c89c69-b5ba-480b-bd12-1aeaaf6cbc01/volumes" Jan 30 12:27:47 crc kubenswrapper[4703]: I0130 12:27:47.105834 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f40c42c3-2575-425d-b80e-845f4a0a88b5" path="/var/lib/kubelet/pods/f40c42c3-2575-425d-b80e-845f4a0a88b5/volumes" Jan 30 12:27:48 crc kubenswrapper[4703]: I0130 12:27:48.087027 4703 scope.go:117] "RemoveContainer" containerID="56f50513b15f72d3dc117f3532e25ad9cf4d47a16a83a4e4592202c941ccbe86" Jan 30 12:27:49 crc kubenswrapper[4703]: I0130 12:27:49.067901 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerStarted","Data":"cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8"} Jan 30 12:27:50 crc kubenswrapper[4703]: I0130 12:27:50.913565 4703 scope.go:117] "RemoveContainer" containerID="014dde91f8eff00112c137311207bb1865572820e51fa8bcfe2fbebec10573b4" Jan 30 12:27:50 crc kubenswrapper[4703]: I0130 12:27:50.963575 4703 scope.go:117] "RemoveContainer" containerID="961b8bfc95a8bebc04532d78d8606028a66b423b6f1fff22a433ac0b34fed470" Jan 30 12:27:50 crc kubenswrapper[4703]: I0130 12:27:50.994915 4703 scope.go:117] "RemoveContainer" containerID="4582e0be712fec21723c0463761917ac26a0bebeb1bbba9c315c0c879e55974e" Jan 30 12:27:51 crc kubenswrapper[4703]: I0130 12:27:51.034640 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:27:51 crc kubenswrapper[4703]: I0130 12:27:51.062516 4703 scope.go:117] "RemoveContainer" containerID="08c418b36146fb180e1ae5350db6f861f21cc15a2e03dc2d5e47a615ba82bf8f" Jan 30 12:27:51 crc kubenswrapper[4703]: I0130 12:27:51.087774 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:27:51 crc kubenswrapper[4703]: E0130 12:27:51.088094 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:27:51 crc kubenswrapper[4703]: I0130 12:27:51.105148 4703 scope.go:117] "RemoveContainer" containerID="47ffa0e122efb607947d6a1571555ddc9c2f8decb42759a24114ff54bb6e6078" Jan 30 12:27:51 crc kubenswrapper[4703]: I0130 12:27:51.144603 4703 scope.go:117] "RemoveContainer" containerID="1a4429f6b7be2777b7116dee67e75ced428a4c3dc7eb4c8da82b6d62615129f6" Jan 30 12:27:51 crc kubenswrapper[4703]: I0130 12:27:51.182849 4703 scope.go:117] "RemoveContainer" containerID="532482240e6cb7e93b40611bb49e68629fe8d076d618c4d9cdec227adc981e94" Jan 30 12:27:51 crc kubenswrapper[4703]: I0130 12:27:51.225950 4703 scope.go:117] "RemoveContainer" containerID="a49e31f3846e4eb083445f41149b614c0c2c3ca4b9985a8acc620ae47fe47d31" Jan 30 12:27:51 crc kubenswrapper[4703]: I0130 12:27:51.251698 4703 scope.go:117] "RemoveContainer" containerID="80458f4316e79ae733c570a0ce5f80d7a0287a90166a42b27ab0859c125c2319" Jan 30 12:27:51 crc kubenswrapper[4703]: I0130 12:27:51.285752 4703 scope.go:117] "RemoveContainer" containerID="3d48100bb4aa8798349c226475ed45d9377a003afcdeea8e8ea83148f015720a" Jan 30 12:27:51 crc kubenswrapper[4703]: I0130 12:27:51.334323 4703 scope.go:117] "RemoveContainer" containerID="87dea74478a4a8b66c31847598edf50382388fac013fe6a4a51054c3e092c2a4" Jan 30 12:27:51 crc kubenswrapper[4703]: I0130 12:27:51.372229 4703 scope.go:117] "RemoveContainer" containerID="8f12f4da993d6766f823f8295f03593cb7d86f53d9ac64ba51601c08925c6e03" Jan 30 12:27:53 crc kubenswrapper[4703]: I0130 12:27:53.122692 4703 generic.go:334] "Generic (PLEG): container finished" podID="2fc19a6b-3cde-4bb5-9499-f5be846289da" containerID="cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8" exitCode=1 Jan 30 12:27:53 crc kubenswrapper[4703]: I0130 12:27:53.122866 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerDied","Data":"cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8"} Jan 30 12:27:53 crc kubenswrapper[4703]: I0130 12:27:53.123830 4703 scope.go:117] "RemoveContainer" containerID="56f50513b15f72d3dc117f3532e25ad9cf4d47a16a83a4e4592202c941ccbe86" Jan 30 12:27:53 crc kubenswrapper[4703]: I0130 12:27:53.125086 4703 scope.go:117] "RemoveContainer" containerID="cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8" Jan 30 12:27:53 crc kubenswrapper[4703]: E0130 12:27:53.125498 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:27:56 crc kubenswrapper[4703]: I0130 12:27:56.035420 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:27:56 crc kubenswrapper[4703]: I0130 12:27:56.037186 4703 scope.go:117] "RemoveContainer" containerID="cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8" Jan 30 12:27:56 crc kubenswrapper[4703]: I0130 12:27:56.037516 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:27:56 crc kubenswrapper[4703]: I0130 12:27:56.037578 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:27:56 crc kubenswrapper[4703]: E0130 12:27:56.037565 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:27:56 crc kubenswrapper[4703]: I0130 12:27:56.986379 4703 scope.go:117] "RemoveContainer" containerID="cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8" Jan 30 12:27:56 crc kubenswrapper[4703]: E0130 12:27:56.987700 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:27:59 crc kubenswrapper[4703]: I0130 12:27:59.051634 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-q9bcn"] Jan 30 12:27:59 crc kubenswrapper[4703]: I0130 12:27:59.064078 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-q9bcn"] Jan 30 12:27:59 crc kubenswrapper[4703]: I0130 12:27:59.101074 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="af9e761d-5f56-419b-8d45-0cadf63e675a" path="/var/lib/kubelet/pods/af9e761d-5f56-419b-8d45-0cadf63e675a/volumes" Jan 30 12:28:05 crc kubenswrapper[4703]: I0130 12:28:05.103392 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:28:05 crc kubenswrapper[4703]: E0130 12:28:05.122086 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:28:12 crc kubenswrapper[4703]: I0130 12:28:12.087548 4703 scope.go:117] "RemoveContainer" containerID="cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8" Jan 30 12:28:12 crc kubenswrapper[4703]: E0130 12:28:12.088813 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:28:20 crc kubenswrapper[4703]: I0130 12:28:20.087175 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:28:20 crc kubenswrapper[4703]: E0130 12:28:20.088038 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:28:21 crc kubenswrapper[4703]: I0130 12:28:21.041352 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-654f-account-create-update-w7fdk"] Jan 30 12:28:21 crc kubenswrapper[4703]: I0130 12:28:21.052751 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-njb8f"] Jan 30 12:28:21 crc kubenswrapper[4703]: I0130 12:28:21.064667 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-6qgph"] Jan 30 12:28:21 crc kubenswrapper[4703]: I0130 12:28:21.079032 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-b4l7b"] Jan 30 12:28:21 crc kubenswrapper[4703]: I0130 12:28:21.100633 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-654f-account-create-update-w7fdk"] Jan 30 12:28:21 crc kubenswrapper[4703]: I0130 12:28:21.102376 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-njb8f"] Jan 30 12:28:21 crc kubenswrapper[4703]: I0130 12:28:21.112222 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-6qgph"] Jan 30 12:28:21 crc kubenswrapper[4703]: I0130 12:28:21.123241 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-9239-account-create-update-rd2pp"] Jan 30 12:28:21 crc kubenswrapper[4703]: I0130 12:28:21.149612 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-b4l7b"] Jan 30 12:28:21 crc kubenswrapper[4703]: I0130 12:28:21.160421 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-c990-account-create-update-hr75h"] Jan 30 12:28:21 crc kubenswrapper[4703]: I0130 12:28:21.172096 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-c990-account-create-update-hr75h"] Jan 30 12:28:21 crc kubenswrapper[4703]: I0130 12:28:21.184661 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-9239-account-create-update-rd2pp"] Jan 30 12:28:23 crc kubenswrapper[4703]: I0130 12:28:23.101728 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23b1be5c-b917-4633-b207-9dfeb79ebadc" path="/var/lib/kubelet/pods/23b1be5c-b917-4633-b207-9dfeb79ebadc/volumes" Jan 30 12:28:23 crc kubenswrapper[4703]: I0130 12:28:23.103185 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24145d6b-2acd-413a-8305-a295030ebe1f" path="/var/lib/kubelet/pods/24145d6b-2acd-413a-8305-a295030ebe1f/volumes" Jan 30 12:28:23 crc kubenswrapper[4703]: I0130 12:28:23.103915 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f7deb02-fe54-403e-b67a-45e8d8e62a62" path="/var/lib/kubelet/pods/5f7deb02-fe54-403e-b67a-45e8d8e62a62/volumes" Jan 30 12:28:23 crc kubenswrapper[4703]: I0130 12:28:23.104916 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d79ec4e-4ab2-4be5-8682-ad1d8aeea327" path="/var/lib/kubelet/pods/6d79ec4e-4ab2-4be5-8682-ad1d8aeea327/volumes" Jan 30 12:28:23 crc kubenswrapper[4703]: I0130 12:28:23.105591 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4a552c6-d8b7-470e-aaae-01d7d29d9cb5" path="/var/lib/kubelet/pods/f4a552c6-d8b7-470e-aaae-01d7d29d9cb5/volumes" Jan 30 12:28:23 crc kubenswrapper[4703]: I0130 12:28:23.107956 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe0648b2-c4f9-47dd-bf18-e00e2958c243" path="/var/lib/kubelet/pods/fe0648b2-c4f9-47dd-bf18-e00e2958c243/volumes" Jan 30 12:28:26 crc kubenswrapper[4703]: I0130 12:28:26.087275 4703 scope.go:117] "RemoveContainer" containerID="cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8" Jan 30 12:28:26 crc kubenswrapper[4703]: E0130 12:28:26.088423 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:28:33 crc kubenswrapper[4703]: I0130 12:28:33.087313 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:28:33 crc kubenswrapper[4703]: E0130 12:28:33.088201 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:28:40 crc kubenswrapper[4703]: I0130 12:28:40.057199 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-hrpzm"] Jan 30 12:28:40 crc kubenswrapper[4703]: I0130 12:28:40.067373 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-hrpzm"] Jan 30 12:28:41 crc kubenswrapper[4703]: I0130 12:28:41.087240 4703 scope.go:117] "RemoveContainer" containerID="cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8" Jan 30 12:28:41 crc kubenswrapper[4703]: E0130 12:28:41.087641 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:28:41 crc kubenswrapper[4703]: I0130 12:28:41.100016 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3da2d370-06f5-4fcc-b58e-2676657e6e85" path="/var/lib/kubelet/pods/3da2d370-06f5-4fcc-b58e-2676657e6e85/volumes" Jan 30 12:28:48 crc kubenswrapper[4703]: I0130 12:28:48.034324 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-75krj"] Jan 30 12:28:48 crc kubenswrapper[4703]: I0130 12:28:48.042530 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-75krj"] Jan 30 12:28:48 crc kubenswrapper[4703]: I0130 12:28:48.086431 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:28:48 crc kubenswrapper[4703]: E0130 12:28:48.086778 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:28:49 crc kubenswrapper[4703]: I0130 12:28:49.101167 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76" path="/var/lib/kubelet/pods/5af82bff-cbe3-4bc9-98cb-cd5b1cbe5f76/volumes" Jan 30 12:28:51 crc kubenswrapper[4703]: I0130 12:28:51.569178 4703 scope.go:117] "RemoveContainer" containerID="53abc2e3771959806dde05dfde05d58a401a7c5f4372f904e42e7e40cbd91918" Jan 30 12:28:51 crc kubenswrapper[4703]: I0130 12:28:51.601347 4703 scope.go:117] "RemoveContainer" containerID="70d535f221182fdf74ffd4bf95b25fa5d6ab150e020d06a40ae98e25bac59a9f" Jan 30 12:28:51 crc kubenswrapper[4703]: I0130 12:28:51.643565 4703 scope.go:117] "RemoveContainer" containerID="ac6c7fe606c25265e0a7610c2c63533975819202f2cfd4cf4cd4188f28779a0f" Jan 30 12:28:51 crc kubenswrapper[4703]: I0130 12:28:51.713980 4703 scope.go:117] "RemoveContainer" containerID="2683e841d3c3fb7cd7de6acfce779ba1263a4ee4512411ec956ea6c89217e13b" Jan 30 12:28:51 crc kubenswrapper[4703]: I0130 12:28:51.745058 4703 scope.go:117] "RemoveContainer" containerID="07f55484c80ec6722c54e982b05d5092192569f8054db9191c386c9b00493b03" Jan 30 12:28:51 crc kubenswrapper[4703]: I0130 12:28:51.789241 4703 scope.go:117] "RemoveContainer" containerID="fb86633033de415e0881625c9d147999160c01808552e9ddb659a3d7908a03a8" Jan 30 12:28:51 crc kubenswrapper[4703]: I0130 12:28:51.816500 4703 scope.go:117] "RemoveContainer" containerID="af5700bcd05249811d07e7aa4a70ed9eb251484b5acb22fd8ec6461ec3009b1b" Jan 30 12:28:51 crc kubenswrapper[4703]: I0130 12:28:51.870763 4703 scope.go:117] "RemoveContainer" containerID="652385ce09399001b04ffc1823d82d1425e460e9ca7665e4b2b3e4d3e61115a6" Jan 30 12:28:51 crc kubenswrapper[4703]: I0130 12:28:51.923105 4703 scope.go:117] "RemoveContainer" containerID="19f2d4d421c4124ccfcc7c381bcca94056f61f6d818cb1b84c1eaba6271260ea" Jan 30 12:28:51 crc kubenswrapper[4703]: I0130 12:28:51.947420 4703 scope.go:117] "RemoveContainer" containerID="b9ee7795f51ad91aa101278413e3e09d2b7e685ca655ad1c5f939e17faffef58" Jan 30 12:28:51 crc kubenswrapper[4703]: I0130 12:28:51.984580 4703 scope.go:117] "RemoveContainer" containerID="88d3059a11cfb7baa4ed49fa8bf0653c640cae343f25dd88df4bd6983d6352e3" Jan 30 12:28:52 crc kubenswrapper[4703]: I0130 12:28:52.010491 4703 scope.go:117] "RemoveContainer" containerID="b39cce2491b0c477dd40fa0817ad89e87c53df3a8f93e40dddeac51360bf2021" Jan 30 12:28:52 crc kubenswrapper[4703]: I0130 12:28:52.041954 4703 scope.go:117] "RemoveContainer" containerID="8535c220de9bfab371dcefdd5a7443678968d6e1ee5ce77af1678101a3bc6f5c" Jan 30 12:28:53 crc kubenswrapper[4703]: I0130 12:28:53.086621 4703 scope.go:117] "RemoveContainer" containerID="cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8" Jan 30 12:28:53 crc kubenswrapper[4703]: E0130 12:28:53.087474 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:29:02 crc kubenswrapper[4703]: I0130 12:29:02.086593 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:29:02 crc kubenswrapper[4703]: E0130 12:29:02.089566 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:29:08 crc kubenswrapper[4703]: I0130 12:29:08.087092 4703 scope.go:117] "RemoveContainer" containerID="cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8" Jan 30 12:29:08 crc kubenswrapper[4703]: E0130 12:29:08.088469 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:29:13 crc kubenswrapper[4703]: I0130 12:29:13.066028 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/watcher-db-sync-vhshs"] Jan 30 12:29:13 crc kubenswrapper[4703]: I0130 12:29:13.076577 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/watcher-db-sync-vhshs"] Jan 30 12:29:13 crc kubenswrapper[4703]: I0130 12:29:13.087938 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:29:13 crc kubenswrapper[4703]: E0130 12:29:13.088325 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:29:13 crc kubenswrapper[4703]: I0130 12:29:13.100430 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd3bf27b-46bc-468e-8735-3a3ed6eda272" path="/var/lib/kubelet/pods/cd3bf27b-46bc-468e-8735-3a3ed6eda272/volumes" Jan 30 12:29:22 crc kubenswrapper[4703]: I0130 12:29:22.087635 4703 scope.go:117] "RemoveContainer" containerID="cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8" Jan 30 12:29:22 crc kubenswrapper[4703]: E0130 12:29:22.088984 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:29:26 crc kubenswrapper[4703]: I0130 12:29:26.086925 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:29:26 crc kubenswrapper[4703]: E0130 12:29:26.087677 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:29:34 crc kubenswrapper[4703]: I0130 12:29:34.087168 4703 scope.go:117] "RemoveContainer" containerID="cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8" Jan 30 12:29:34 crc kubenswrapper[4703]: E0130 12:29:34.087900 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:29:40 crc kubenswrapper[4703]: I0130 12:29:40.087275 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:29:40 crc kubenswrapper[4703]: E0130 12:29:40.088030 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:29:48 crc kubenswrapper[4703]: I0130 12:29:48.086929 4703 scope.go:117] "RemoveContainer" containerID="cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8" Jan 30 12:29:48 crc kubenswrapper[4703]: E0130 12:29:48.088211 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:29:49 crc kubenswrapper[4703]: I0130 12:29:49.101863 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-95pfb"] Jan 30 12:29:49 crc kubenswrapper[4703]: I0130 12:29:49.102025 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-95pfb"] Jan 30 12:29:51 crc kubenswrapper[4703]: I0130 12:29:51.099692 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd8fdb9b-147d-4634-a312-719f1c62c7ff" path="/var/lib/kubelet/pods/bd8fdb9b-147d-4634-a312-719f1c62c7ff/volumes" Jan 30 12:29:52 crc kubenswrapper[4703]: I0130 12:29:52.301807 4703 scope.go:117] "RemoveContainer" containerID="7776806fdae7c2480f7db42443abf5deec35b5bf8601c499efad9a1a1f1a81d9" Jan 30 12:29:52 crc kubenswrapper[4703]: I0130 12:29:52.331601 4703 scope.go:117] "RemoveContainer" containerID="6dcbea808c1e6ab36bb68a56e173af69dedb9e73ac24c2601a2deb7572065251" Jan 30 12:29:52 crc kubenswrapper[4703]: I0130 12:29:52.426360 4703 scope.go:117] "RemoveContainer" containerID="1cf4f25cf82f53ff27b32327a97dd0d804b3013f6aecfdf31873c1098401e568" Jan 30 12:29:52 crc kubenswrapper[4703]: I0130 12:29:52.481065 4703 scope.go:117] "RemoveContainer" containerID="f96b4388e40767582588b210e283c06412984bc5aaac06d7107321a9a84ef6d7" Jan 30 12:29:55 crc kubenswrapper[4703]: I0130 12:29:55.097805 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:29:55 crc kubenswrapper[4703]: I0130 12:29:55.522773 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerStarted","Data":"5fa5c313f1cbb35a96c31064d256324b86779414c02a6d827c9dc4a8b556105a"} Jan 30 12:29:59 crc kubenswrapper[4703]: I0130 12:29:59.046991 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-8nnnq"] Jan 30 12:29:59 crc kubenswrapper[4703]: I0130 12:29:59.059935 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-8nnnq"] Jan 30 12:29:59 crc kubenswrapper[4703]: I0130 12:29:59.099611 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="147c2570-9be4-4fb1-9789-f7bc204119db" path="/var/lib/kubelet/pods/147c2570-9be4-4fb1-9789-f7bc204119db/volumes" Jan 30 12:30:00 crc kubenswrapper[4703]: I0130 12:30:00.087482 4703 scope.go:117] "RemoveContainer" containerID="cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8" Jan 30 12:30:00 crc kubenswrapper[4703]: E0130 12:30:00.088335 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:30:00 crc kubenswrapper[4703]: I0130 12:30:00.156364 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr"] Jan 30 12:30:00 crc kubenswrapper[4703]: I0130 12:30:00.158170 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr" Jan 30 12:30:00 crc kubenswrapper[4703]: I0130 12:30:00.169242 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 30 12:30:00 crc kubenswrapper[4703]: I0130 12:30:00.172410 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 30 12:30:00 crc kubenswrapper[4703]: I0130 12:30:00.174075 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr"] Jan 30 12:30:00 crc kubenswrapper[4703]: I0130 12:30:00.278862 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/81998cd1-defd-41b1-834e-20b5f3102aed-config-volume\") pod \"collect-profiles-29496270-bfxpr\" (UID: \"81998cd1-defd-41b1-834e-20b5f3102aed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr" Jan 30 12:30:00 crc kubenswrapper[4703]: I0130 12:30:00.279078 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7lz8\" (UniqueName: \"kubernetes.io/projected/81998cd1-defd-41b1-834e-20b5f3102aed-kube-api-access-l7lz8\") pod \"collect-profiles-29496270-bfxpr\" (UID: \"81998cd1-defd-41b1-834e-20b5f3102aed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr" Jan 30 12:30:00 crc kubenswrapper[4703]: I0130 12:30:00.279207 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/81998cd1-defd-41b1-834e-20b5f3102aed-secret-volume\") pod \"collect-profiles-29496270-bfxpr\" (UID: \"81998cd1-defd-41b1-834e-20b5f3102aed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr" Jan 30 12:30:00 crc kubenswrapper[4703]: I0130 12:30:00.381385 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7lz8\" (UniqueName: \"kubernetes.io/projected/81998cd1-defd-41b1-834e-20b5f3102aed-kube-api-access-l7lz8\") pod \"collect-profiles-29496270-bfxpr\" (UID: \"81998cd1-defd-41b1-834e-20b5f3102aed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr" Jan 30 12:30:00 crc kubenswrapper[4703]: I0130 12:30:00.381580 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/81998cd1-defd-41b1-834e-20b5f3102aed-secret-volume\") pod \"collect-profiles-29496270-bfxpr\" (UID: \"81998cd1-defd-41b1-834e-20b5f3102aed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr" Jan 30 12:30:00 crc kubenswrapper[4703]: I0130 12:30:00.381937 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/81998cd1-defd-41b1-834e-20b5f3102aed-config-volume\") pod \"collect-profiles-29496270-bfxpr\" (UID: \"81998cd1-defd-41b1-834e-20b5f3102aed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr" Jan 30 12:30:00 crc kubenswrapper[4703]: I0130 12:30:00.383440 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/81998cd1-defd-41b1-834e-20b5f3102aed-config-volume\") pod \"collect-profiles-29496270-bfxpr\" (UID: \"81998cd1-defd-41b1-834e-20b5f3102aed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr" Jan 30 12:30:00 crc kubenswrapper[4703]: I0130 12:30:00.400253 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/81998cd1-defd-41b1-834e-20b5f3102aed-secret-volume\") pod \"collect-profiles-29496270-bfxpr\" (UID: \"81998cd1-defd-41b1-834e-20b5f3102aed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr" Jan 30 12:30:00 crc kubenswrapper[4703]: I0130 12:30:00.408386 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7lz8\" (UniqueName: \"kubernetes.io/projected/81998cd1-defd-41b1-834e-20b5f3102aed-kube-api-access-l7lz8\") pod \"collect-profiles-29496270-bfxpr\" (UID: \"81998cd1-defd-41b1-834e-20b5f3102aed\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr" Jan 30 12:30:00 crc kubenswrapper[4703]: I0130 12:30:00.489480 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr" Jan 30 12:30:01 crc kubenswrapper[4703]: I0130 12:30:01.013957 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr"] Jan 30 12:30:01 crc kubenswrapper[4703]: I0130 12:30:01.601285 4703 generic.go:334] "Generic (PLEG): container finished" podID="81998cd1-defd-41b1-834e-20b5f3102aed" containerID="4b643bfab32726688ea684fd55232650d9d804279126e56467d74c619e844afc" exitCode=0 Jan 30 12:30:01 crc kubenswrapper[4703]: I0130 12:30:01.601590 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr" event={"ID":"81998cd1-defd-41b1-834e-20b5f3102aed","Type":"ContainerDied","Data":"4b643bfab32726688ea684fd55232650d9d804279126e56467d74c619e844afc"} Jan 30 12:30:01 crc kubenswrapper[4703]: I0130 12:30:01.602192 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr" event={"ID":"81998cd1-defd-41b1-834e-20b5f3102aed","Type":"ContainerStarted","Data":"ab95a636d64b3a5fdb779f642b24a641e8ac4381757e737990f01743295b6e8b"} Jan 30 12:30:02 crc kubenswrapper[4703]: I0130 12:30:02.967101 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr" Jan 30 12:30:03 crc kubenswrapper[4703]: I0130 12:30:03.052679 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7lz8\" (UniqueName: \"kubernetes.io/projected/81998cd1-defd-41b1-834e-20b5f3102aed-kube-api-access-l7lz8\") pod \"81998cd1-defd-41b1-834e-20b5f3102aed\" (UID: \"81998cd1-defd-41b1-834e-20b5f3102aed\") " Jan 30 12:30:03 crc kubenswrapper[4703]: I0130 12:30:03.052781 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/81998cd1-defd-41b1-834e-20b5f3102aed-secret-volume\") pod \"81998cd1-defd-41b1-834e-20b5f3102aed\" (UID: \"81998cd1-defd-41b1-834e-20b5f3102aed\") " Jan 30 12:30:03 crc kubenswrapper[4703]: I0130 12:30:03.052916 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/81998cd1-defd-41b1-834e-20b5f3102aed-config-volume\") pod \"81998cd1-defd-41b1-834e-20b5f3102aed\" (UID: \"81998cd1-defd-41b1-834e-20b5f3102aed\") " Jan 30 12:30:03 crc kubenswrapper[4703]: I0130 12:30:03.054758 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81998cd1-defd-41b1-834e-20b5f3102aed-config-volume" (OuterVolumeSpecName: "config-volume") pod "81998cd1-defd-41b1-834e-20b5f3102aed" (UID: "81998cd1-defd-41b1-834e-20b5f3102aed"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:30:03 crc kubenswrapper[4703]: I0130 12:30:03.063410 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81998cd1-defd-41b1-834e-20b5f3102aed-kube-api-access-l7lz8" (OuterVolumeSpecName: "kube-api-access-l7lz8") pod "81998cd1-defd-41b1-834e-20b5f3102aed" (UID: "81998cd1-defd-41b1-834e-20b5f3102aed"). InnerVolumeSpecName "kube-api-access-l7lz8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:30:03 crc kubenswrapper[4703]: I0130 12:30:03.067451 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81998cd1-defd-41b1-834e-20b5f3102aed-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "81998cd1-defd-41b1-834e-20b5f3102aed" (UID: "81998cd1-defd-41b1-834e-20b5f3102aed"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:30:03 crc kubenswrapper[4703]: I0130 12:30:03.156606 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7lz8\" (UniqueName: \"kubernetes.io/projected/81998cd1-defd-41b1-834e-20b5f3102aed-kube-api-access-l7lz8\") on node \"crc\" DevicePath \"\"" Jan 30 12:30:03 crc kubenswrapper[4703]: I0130 12:30:03.156659 4703 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/81998cd1-defd-41b1-834e-20b5f3102aed-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 30 12:30:03 crc kubenswrapper[4703]: I0130 12:30:03.156671 4703 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/81998cd1-defd-41b1-834e-20b5f3102aed-config-volume\") on node \"crc\" DevicePath \"\"" Jan 30 12:30:03 crc kubenswrapper[4703]: I0130 12:30:03.625314 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr" event={"ID":"81998cd1-defd-41b1-834e-20b5f3102aed","Type":"ContainerDied","Data":"ab95a636d64b3a5fdb779f642b24a641e8ac4381757e737990f01743295b6e8b"} Jan 30 12:30:03 crc kubenswrapper[4703]: I0130 12:30:03.625382 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ab95a636d64b3a5fdb779f642b24a641e8ac4381757e737990f01743295b6e8b" Jan 30 12:30:03 crc kubenswrapper[4703]: I0130 12:30:03.625398 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496270-bfxpr" Jan 30 12:30:04 crc kubenswrapper[4703]: I0130 12:30:04.051511 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786"] Jan 30 12:30:04 crc kubenswrapper[4703]: I0130 12:30:04.060553 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496225-zh786"] Jan 30 12:30:05 crc kubenswrapper[4703]: I0130 12:30:05.108364 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2eaf3e49-2394-495c-926e-5504ff81ccc5" path="/var/lib/kubelet/pods/2eaf3e49-2394-495c-926e-5504ff81ccc5/volumes" Jan 30 12:30:15 crc kubenswrapper[4703]: I0130 12:30:15.095766 4703 scope.go:117] "RemoveContainer" containerID="cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8" Jan 30 12:30:15 crc kubenswrapper[4703]: E0130 12:30:15.097259 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:30:15 crc kubenswrapper[4703]: I0130 12:30:15.786189 4703 generic.go:334] "Generic (PLEG): container finished" podID="d7bb23ae-c393-4b73-a856-b61d160d513d" containerID="9585fc35f92406504ff487d3c51fe659281d60ce77c9e2b8ce234ff45f244224" exitCode=0 Jan 30 12:30:15 crc kubenswrapper[4703]: I0130 12:30:15.786240 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" event={"ID":"d7bb23ae-c393-4b73-a856-b61d160d513d","Type":"ContainerDied","Data":"9585fc35f92406504ff487d3c51fe659281d60ce77c9e2b8ce234ff45f244224"} Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.299275 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.425134 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-bootstrap-combined-ca-bundle\") pod \"d7bb23ae-c393-4b73-a856-b61d160d513d\" (UID: \"d7bb23ae-c393-4b73-a856-b61d160d513d\") " Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.425362 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-ssh-key-openstack-edpm-ipam\") pod \"d7bb23ae-c393-4b73-a856-b61d160d513d\" (UID: \"d7bb23ae-c393-4b73-a856-b61d160d513d\") " Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.425488 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-inventory\") pod \"d7bb23ae-c393-4b73-a856-b61d160d513d\" (UID: \"d7bb23ae-c393-4b73-a856-b61d160d513d\") " Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.425633 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xnh4v\" (UniqueName: \"kubernetes.io/projected/d7bb23ae-c393-4b73-a856-b61d160d513d-kube-api-access-xnh4v\") pod \"d7bb23ae-c393-4b73-a856-b61d160d513d\" (UID: \"d7bb23ae-c393-4b73-a856-b61d160d513d\") " Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.436371 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "d7bb23ae-c393-4b73-a856-b61d160d513d" (UID: "d7bb23ae-c393-4b73-a856-b61d160d513d"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.436560 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7bb23ae-c393-4b73-a856-b61d160d513d-kube-api-access-xnh4v" (OuterVolumeSpecName: "kube-api-access-xnh4v") pod "d7bb23ae-c393-4b73-a856-b61d160d513d" (UID: "d7bb23ae-c393-4b73-a856-b61d160d513d"). InnerVolumeSpecName "kube-api-access-xnh4v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.464500 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-inventory" (OuterVolumeSpecName: "inventory") pod "d7bb23ae-c393-4b73-a856-b61d160d513d" (UID: "d7bb23ae-c393-4b73-a856-b61d160d513d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.470414 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "d7bb23ae-c393-4b73-a856-b61d160d513d" (UID: "d7bb23ae-c393-4b73-a856-b61d160d513d"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.530724 4703 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.530774 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.530787 4703 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d7bb23ae-c393-4b73-a856-b61d160d513d-inventory\") on node \"crc\" DevicePath \"\"" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.530798 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xnh4v\" (UniqueName: \"kubernetes.io/projected/d7bb23ae-c393-4b73-a856-b61d160d513d-kube-api-access-xnh4v\") on node \"crc\" DevicePath \"\"" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.813612 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" event={"ID":"d7bb23ae-c393-4b73-a856-b61d160d513d","Type":"ContainerDied","Data":"33e89bf64304a9cbb29e1747f42a6fa0e0e20edb5b5dff1921fa80a0bbafafa7"} Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.813681 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="33e89bf64304a9cbb29e1747f42a6fa0e0e20edb5b5dff1921fa80a0bbafafa7" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.813729 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.960490 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn"] Jan 30 12:30:17 crc kubenswrapper[4703]: E0130 12:30:17.961308 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7bb23ae-c393-4b73-a856-b61d160d513d" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.961347 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7bb23ae-c393-4b73-a856-b61d160d513d" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 30 12:30:17 crc kubenswrapper[4703]: E0130 12:30:17.961388 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81998cd1-defd-41b1-834e-20b5f3102aed" containerName="collect-profiles" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.961396 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="81998cd1-defd-41b1-834e-20b5f3102aed" containerName="collect-profiles" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.961783 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7bb23ae-c393-4b73-a856-b61d160d513d" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.961820 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="81998cd1-defd-41b1-834e-20b5f3102aed" containerName="collect-profiles" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.963336 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.966248 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.967873 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.967900 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.968009 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-jjdl5" Jan 30 12:30:17 crc kubenswrapper[4703]: I0130 12:30:17.979515 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn"] Jan 30 12:30:18 crc kubenswrapper[4703]: I0130 12:30:18.051839 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0374bdb-2201-41a5-90ff-2185eac3add1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vmszn\" (UID: \"e0374bdb-2201-41a5-90ff-2185eac3add1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" Jan 30 12:30:18 crc kubenswrapper[4703]: I0130 12:30:18.051941 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tcl6\" (UniqueName: \"kubernetes.io/projected/e0374bdb-2201-41a5-90ff-2185eac3add1-kube-api-access-7tcl6\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vmszn\" (UID: \"e0374bdb-2201-41a5-90ff-2185eac3add1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" Jan 30 12:30:18 crc kubenswrapper[4703]: I0130 12:30:18.051999 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e0374bdb-2201-41a5-90ff-2185eac3add1-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vmszn\" (UID: \"e0374bdb-2201-41a5-90ff-2185eac3add1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" Jan 30 12:30:18 crc kubenswrapper[4703]: I0130 12:30:18.156335 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0374bdb-2201-41a5-90ff-2185eac3add1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vmszn\" (UID: \"e0374bdb-2201-41a5-90ff-2185eac3add1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" Jan 30 12:30:18 crc kubenswrapper[4703]: I0130 12:30:18.156601 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tcl6\" (UniqueName: \"kubernetes.io/projected/e0374bdb-2201-41a5-90ff-2185eac3add1-kube-api-access-7tcl6\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vmszn\" (UID: \"e0374bdb-2201-41a5-90ff-2185eac3add1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" Jan 30 12:30:18 crc kubenswrapper[4703]: I0130 12:30:18.156629 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e0374bdb-2201-41a5-90ff-2185eac3add1-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vmszn\" (UID: \"e0374bdb-2201-41a5-90ff-2185eac3add1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" Jan 30 12:30:18 crc kubenswrapper[4703]: I0130 12:30:18.162721 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0374bdb-2201-41a5-90ff-2185eac3add1-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vmszn\" (UID: \"e0374bdb-2201-41a5-90ff-2185eac3add1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" Jan 30 12:30:18 crc kubenswrapper[4703]: I0130 12:30:18.162929 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e0374bdb-2201-41a5-90ff-2185eac3add1-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vmszn\" (UID: \"e0374bdb-2201-41a5-90ff-2185eac3add1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" Jan 30 12:30:18 crc kubenswrapper[4703]: I0130 12:30:18.177487 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tcl6\" (UniqueName: \"kubernetes.io/projected/e0374bdb-2201-41a5-90ff-2185eac3add1-kube-api-access-7tcl6\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-vmszn\" (UID: \"e0374bdb-2201-41a5-90ff-2185eac3add1\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" Jan 30 12:30:18 crc kubenswrapper[4703]: I0130 12:30:18.290770 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" Jan 30 12:30:18 crc kubenswrapper[4703]: I0130 12:30:18.946445 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn"] Jan 30 12:30:19 crc kubenswrapper[4703]: I0130 12:30:19.841320 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" event={"ID":"e0374bdb-2201-41a5-90ff-2185eac3add1","Type":"ContainerStarted","Data":"9ef70cc0c091d0ddab52723883514e48c21f1eec5a57d69b082afc0f840d8c2e"} Jan 30 12:30:19 crc kubenswrapper[4703]: I0130 12:30:19.841742 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" event={"ID":"e0374bdb-2201-41a5-90ff-2185eac3add1","Type":"ContainerStarted","Data":"15f07c38f630bfbe14bf3b47c57108196be008642abf1c8c4e9ce377ae74d9d0"} Jan 30 12:30:19 crc kubenswrapper[4703]: I0130 12:30:19.878239 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" podStartSLOduration=2.458955281 podStartE2EDuration="2.878220645s" podCreationTimestamp="2026-01-30 12:30:17 +0000 UTC" firstStartedPulling="2026-01-30 12:30:18.957104733 +0000 UTC m=+2054.734926387" lastFinishedPulling="2026-01-30 12:30:19.376370097 +0000 UTC m=+2055.154191751" observedRunningTime="2026-01-30 12:30:19.874986369 +0000 UTC m=+2055.652808023" watchObservedRunningTime="2026-01-30 12:30:19.878220645 +0000 UTC m=+2055.656042299" Jan 30 12:30:21 crc kubenswrapper[4703]: I0130 12:30:21.043963 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-mmqwb"] Jan 30 12:30:21 crc kubenswrapper[4703]: I0130 12:30:21.056511 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-mmqwb"] Jan 30 12:30:21 crc kubenswrapper[4703]: I0130 12:30:21.099066 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="273a2195-27a6-4229-b3c0-8c67d9fc4302" path="/var/lib/kubelet/pods/273a2195-27a6-4229-b3c0-8c67d9fc4302/volumes" Jan 30 12:30:26 crc kubenswrapper[4703]: I0130 12:30:26.086447 4703 scope.go:117] "RemoveContainer" containerID="cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8" Jan 30 12:30:26 crc kubenswrapper[4703]: E0130 12:30:26.087284 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:30:37 crc kubenswrapper[4703]: I0130 12:30:37.086825 4703 scope.go:117] "RemoveContainer" containerID="cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8" Jan 30 12:30:38 crc kubenswrapper[4703]: I0130 12:30:38.040576 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerStarted","Data":"1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834"} Jan 30 12:30:41 crc kubenswrapper[4703]: I0130 12:30:41.034501 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:30:41 crc kubenswrapper[4703]: I0130 12:30:41.074366 4703 generic.go:334] "Generic (PLEG): container finished" podID="2fc19a6b-3cde-4bb5-9499-f5be846289da" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" exitCode=1 Jan 30 12:30:41 crc kubenswrapper[4703]: I0130 12:30:41.074440 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerDied","Data":"1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834"} Jan 30 12:30:41 crc kubenswrapper[4703]: I0130 12:30:41.074523 4703 scope.go:117] "RemoveContainer" containerID="cf74f1c120a4fed22b346bbec6eadca8a7fe15484389d0a9ef8e0fa79956a4d8" Jan 30 12:30:41 crc kubenswrapper[4703]: I0130 12:30:41.075917 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:30:41 crc kubenswrapper[4703]: E0130 12:30:41.076487 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:30:46 crc kubenswrapper[4703]: I0130 12:30:46.035296 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:30:46 crc kubenswrapper[4703]: I0130 12:30:46.036319 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:30:46 crc kubenswrapper[4703]: I0130 12:30:46.036336 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:30:46 crc kubenswrapper[4703]: I0130 12:30:46.037553 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:30:46 crc kubenswrapper[4703]: E0130 12:30:46.037850 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:30:50 crc kubenswrapper[4703]: I0130 12:30:50.844617 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dfhth"] Jan 30 12:30:50 crc kubenswrapper[4703]: I0130 12:30:50.849917 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dfhth" Jan 30 12:30:50 crc kubenswrapper[4703]: I0130 12:30:50.860350 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dfhth"] Jan 30 12:30:50 crc kubenswrapper[4703]: I0130 12:30:50.883485 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scgw6\" (UniqueName: \"kubernetes.io/projected/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-kube-api-access-scgw6\") pod \"redhat-operators-dfhth\" (UID: \"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2\") " pod="openshift-marketplace/redhat-operators-dfhth" Jan 30 12:30:50 crc kubenswrapper[4703]: I0130 12:30:50.883595 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-catalog-content\") pod \"redhat-operators-dfhth\" (UID: \"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2\") " pod="openshift-marketplace/redhat-operators-dfhth" Jan 30 12:30:50 crc kubenswrapper[4703]: I0130 12:30:50.883723 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-utilities\") pod \"redhat-operators-dfhth\" (UID: \"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2\") " pod="openshift-marketplace/redhat-operators-dfhth" Jan 30 12:30:50 crc kubenswrapper[4703]: I0130 12:30:50.987107 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-utilities\") pod \"redhat-operators-dfhth\" (UID: \"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2\") " pod="openshift-marketplace/redhat-operators-dfhth" Jan 30 12:30:50 crc kubenswrapper[4703]: I0130 12:30:50.987227 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scgw6\" (UniqueName: \"kubernetes.io/projected/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-kube-api-access-scgw6\") pod \"redhat-operators-dfhth\" (UID: \"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2\") " pod="openshift-marketplace/redhat-operators-dfhth" Jan 30 12:30:50 crc kubenswrapper[4703]: I0130 12:30:50.987317 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-catalog-content\") pod \"redhat-operators-dfhth\" (UID: \"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2\") " pod="openshift-marketplace/redhat-operators-dfhth" Jan 30 12:30:50 crc kubenswrapper[4703]: I0130 12:30:50.987851 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-utilities\") pod \"redhat-operators-dfhth\" (UID: \"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2\") " pod="openshift-marketplace/redhat-operators-dfhth" Jan 30 12:30:50 crc kubenswrapper[4703]: I0130 12:30:50.987957 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-catalog-content\") pod \"redhat-operators-dfhth\" (UID: \"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2\") " pod="openshift-marketplace/redhat-operators-dfhth" Jan 30 12:30:51 crc kubenswrapper[4703]: I0130 12:30:51.013232 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scgw6\" (UniqueName: \"kubernetes.io/projected/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-kube-api-access-scgw6\") pod \"redhat-operators-dfhth\" (UID: \"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2\") " pod="openshift-marketplace/redhat-operators-dfhth" Jan 30 12:30:51 crc kubenswrapper[4703]: I0130 12:30:51.183835 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dfhth" Jan 30 12:30:51 crc kubenswrapper[4703]: I0130 12:30:51.721396 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dfhth"] Jan 30 12:30:52 crc kubenswrapper[4703]: I0130 12:30:52.205813 4703 generic.go:334] "Generic (PLEG): container finished" podID="a7d20c26-413b-4b4d-8c28-bbe10d95b5d2" containerID="c3dd51d48eb52b4be0c7b986a8908a1d453128f32dae04ab4d9bddd00ef86ee1" exitCode=0 Jan 30 12:30:52 crc kubenswrapper[4703]: I0130 12:30:52.205876 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dfhth" event={"ID":"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2","Type":"ContainerDied","Data":"c3dd51d48eb52b4be0c7b986a8908a1d453128f32dae04ab4d9bddd00ef86ee1"} Jan 30 12:30:52 crc kubenswrapper[4703]: I0130 12:30:52.206203 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dfhth" event={"ID":"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2","Type":"ContainerStarted","Data":"9199d2b2f0d490341ae760e0ae6b508f40d2cd6303b5033be734596047906b4f"} Jan 30 12:30:52 crc kubenswrapper[4703]: I0130 12:30:52.591606 4703 scope.go:117] "RemoveContainer" containerID="854d15c00e6754d489a505fd2e9138494328c64efebf46f08e1c4a3040ebbed5" Jan 30 12:30:52 crc kubenswrapper[4703]: I0130 12:30:52.626493 4703 scope.go:117] "RemoveContainer" containerID="3a38b773280de96cef8739a5579de3e9421f01c64a9e3befcc4c24a982f1eead" Jan 30 12:30:52 crc kubenswrapper[4703]: I0130 12:30:52.692403 4703 scope.go:117] "RemoveContainer" containerID="b8cecdf7f5f56768b007344414621396ae5457fcf96b89f2d2cbae7fa9ba8cdb" Jan 30 12:30:54 crc kubenswrapper[4703]: I0130 12:30:54.266542 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dfhth" event={"ID":"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2","Type":"ContainerStarted","Data":"12b8921b9cc2fb754a2f95ed2a8c33e69cd99cfd5a6629465b7a317857a0889d"} Jan 30 12:30:56 crc kubenswrapper[4703]: I0130 12:30:56.290639 4703 generic.go:334] "Generic (PLEG): container finished" podID="a7d20c26-413b-4b4d-8c28-bbe10d95b5d2" containerID="12b8921b9cc2fb754a2f95ed2a8c33e69cd99cfd5a6629465b7a317857a0889d" exitCode=0 Jan 30 12:30:56 crc kubenswrapper[4703]: I0130 12:30:56.290703 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dfhth" event={"ID":"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2","Type":"ContainerDied","Data":"12b8921b9cc2fb754a2f95ed2a8c33e69cd99cfd5a6629465b7a317857a0889d"} Jan 30 12:30:57 crc kubenswrapper[4703]: I0130 12:30:57.304176 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dfhth" event={"ID":"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2","Type":"ContainerStarted","Data":"b885c7f47ae60761b3e58055bb964a8e8785c83268cf4fc1f3be5bda1899d344"} Jan 30 12:30:57 crc kubenswrapper[4703]: I0130 12:30:57.336692 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dfhth" podStartSLOduration=2.537563296 podStartE2EDuration="7.33666724s" podCreationTimestamp="2026-01-30 12:30:50 +0000 UTC" firstStartedPulling="2026-01-30 12:30:52.20808778 +0000 UTC m=+2087.985909434" lastFinishedPulling="2026-01-30 12:30:57.007191724 +0000 UTC m=+2092.785013378" observedRunningTime="2026-01-30 12:30:57.324878645 +0000 UTC m=+2093.102700319" watchObservedRunningTime="2026-01-30 12:30:57.33666724 +0000 UTC m=+2093.114488894" Jan 30 12:30:58 crc kubenswrapper[4703]: I0130 12:30:58.086407 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:30:58 crc kubenswrapper[4703]: E0130 12:30:58.086758 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:31:00 crc kubenswrapper[4703]: I0130 12:31:00.051503 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-vkzk9"] Jan 30 12:31:00 crc kubenswrapper[4703]: I0130 12:31:00.060707 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-vkzk9"] Jan 30 12:31:01 crc kubenswrapper[4703]: I0130 12:31:01.100159 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70" path="/var/lib/kubelet/pods/b41b5fbf-19a1-48ce-8eaf-a6f2ae179e70/volumes" Jan 30 12:31:01 crc kubenswrapper[4703]: I0130 12:31:01.184789 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dfhth" Jan 30 12:31:01 crc kubenswrapper[4703]: I0130 12:31:01.184869 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dfhth" Jan 30 12:31:02 crc kubenswrapper[4703]: I0130 12:31:02.043712 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-scrgs"] Jan 30 12:31:02 crc kubenswrapper[4703]: I0130 12:31:02.056181 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-scrgs"] Jan 30 12:31:02 crc kubenswrapper[4703]: I0130 12:31:02.241400 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dfhth" podUID="a7d20c26-413b-4b4d-8c28-bbe10d95b5d2" containerName="registry-server" probeResult="failure" output=< Jan 30 12:31:02 crc kubenswrapper[4703]: timeout: failed to connect service ":50051" within 1s Jan 30 12:31:02 crc kubenswrapper[4703]: > Jan 30 12:31:03 crc kubenswrapper[4703]: I0130 12:31:03.104499 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1665113c-fcaa-4a13-9de2-552579864e44" path="/var/lib/kubelet/pods/1665113c-fcaa-4a13-9de2-552579864e44/volumes" Jan 30 12:31:11 crc kubenswrapper[4703]: I0130 12:31:11.241358 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dfhth" Jan 30 12:31:11 crc kubenswrapper[4703]: I0130 12:31:11.306690 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dfhth" Jan 30 12:31:12 crc kubenswrapper[4703]: I0130 12:31:12.087798 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:31:12 crc kubenswrapper[4703]: E0130 12:31:12.088165 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:31:14 crc kubenswrapper[4703]: I0130 12:31:14.825484 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dfhth"] Jan 30 12:31:14 crc kubenswrapper[4703]: I0130 12:31:14.826451 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dfhth" podUID="a7d20c26-413b-4b4d-8c28-bbe10d95b5d2" containerName="registry-server" containerID="cri-o://b885c7f47ae60761b3e58055bb964a8e8785c83268cf4fc1f3be5bda1899d344" gracePeriod=2 Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.326190 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dfhth" Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.434387 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scgw6\" (UniqueName: \"kubernetes.io/projected/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-kube-api-access-scgw6\") pod \"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2\" (UID: \"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2\") " Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.434657 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-utilities\") pod \"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2\" (UID: \"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2\") " Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.434941 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-catalog-content\") pod \"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2\" (UID: \"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2\") " Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.435944 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-utilities" (OuterVolumeSpecName: "utilities") pod "a7d20c26-413b-4b4d-8c28-bbe10d95b5d2" (UID: "a7d20c26-413b-4b4d-8c28-bbe10d95b5d2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.446826 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-kube-api-access-scgw6" (OuterVolumeSpecName: "kube-api-access-scgw6") pod "a7d20c26-413b-4b4d-8c28-bbe10d95b5d2" (UID: "a7d20c26-413b-4b4d-8c28-bbe10d95b5d2"). InnerVolumeSpecName "kube-api-access-scgw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.498489 4703 generic.go:334] "Generic (PLEG): container finished" podID="a7d20c26-413b-4b4d-8c28-bbe10d95b5d2" containerID="b885c7f47ae60761b3e58055bb964a8e8785c83268cf4fc1f3be5bda1899d344" exitCode=0 Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.498551 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dfhth" event={"ID":"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2","Type":"ContainerDied","Data":"b885c7f47ae60761b3e58055bb964a8e8785c83268cf4fc1f3be5bda1899d344"} Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.498567 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dfhth" Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.498582 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dfhth" event={"ID":"a7d20c26-413b-4b4d-8c28-bbe10d95b5d2","Type":"ContainerDied","Data":"9199d2b2f0d490341ae760e0ae6b508f40d2cd6303b5033be734596047906b4f"} Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.498600 4703 scope.go:117] "RemoveContainer" containerID="b885c7f47ae60761b3e58055bb964a8e8785c83268cf4fc1f3be5bda1899d344" Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.535978 4703 scope.go:117] "RemoveContainer" containerID="12b8921b9cc2fb754a2f95ed2a8c33e69cd99cfd5a6629465b7a317857a0889d" Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.537839 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.537894 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-scgw6\" (UniqueName: \"kubernetes.io/projected/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-kube-api-access-scgw6\") on node \"crc\" DevicePath \"\"" Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.573162 4703 scope.go:117] "RemoveContainer" containerID="c3dd51d48eb52b4be0c7b986a8908a1d453128f32dae04ab4d9bddd00ef86ee1" Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.595113 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a7d20c26-413b-4b4d-8c28-bbe10d95b5d2" (UID: "a7d20c26-413b-4b4d-8c28-bbe10d95b5d2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.615662 4703 scope.go:117] "RemoveContainer" containerID="b885c7f47ae60761b3e58055bb964a8e8785c83268cf4fc1f3be5bda1899d344" Jan 30 12:31:15 crc kubenswrapper[4703]: E0130 12:31:15.616442 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b885c7f47ae60761b3e58055bb964a8e8785c83268cf4fc1f3be5bda1899d344\": container with ID starting with b885c7f47ae60761b3e58055bb964a8e8785c83268cf4fc1f3be5bda1899d344 not found: ID does not exist" containerID="b885c7f47ae60761b3e58055bb964a8e8785c83268cf4fc1f3be5bda1899d344" Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.616510 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b885c7f47ae60761b3e58055bb964a8e8785c83268cf4fc1f3be5bda1899d344"} err="failed to get container status \"b885c7f47ae60761b3e58055bb964a8e8785c83268cf4fc1f3be5bda1899d344\": rpc error: code = NotFound desc = could not find container \"b885c7f47ae60761b3e58055bb964a8e8785c83268cf4fc1f3be5bda1899d344\": container with ID starting with b885c7f47ae60761b3e58055bb964a8e8785c83268cf4fc1f3be5bda1899d344 not found: ID does not exist" Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.616548 4703 scope.go:117] "RemoveContainer" containerID="12b8921b9cc2fb754a2f95ed2a8c33e69cd99cfd5a6629465b7a317857a0889d" Jan 30 12:31:15 crc kubenswrapper[4703]: E0130 12:31:15.617147 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12b8921b9cc2fb754a2f95ed2a8c33e69cd99cfd5a6629465b7a317857a0889d\": container with ID starting with 12b8921b9cc2fb754a2f95ed2a8c33e69cd99cfd5a6629465b7a317857a0889d not found: ID does not exist" containerID="12b8921b9cc2fb754a2f95ed2a8c33e69cd99cfd5a6629465b7a317857a0889d" Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.617181 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12b8921b9cc2fb754a2f95ed2a8c33e69cd99cfd5a6629465b7a317857a0889d"} err="failed to get container status \"12b8921b9cc2fb754a2f95ed2a8c33e69cd99cfd5a6629465b7a317857a0889d\": rpc error: code = NotFound desc = could not find container \"12b8921b9cc2fb754a2f95ed2a8c33e69cd99cfd5a6629465b7a317857a0889d\": container with ID starting with 12b8921b9cc2fb754a2f95ed2a8c33e69cd99cfd5a6629465b7a317857a0889d not found: ID does not exist" Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.617198 4703 scope.go:117] "RemoveContainer" containerID="c3dd51d48eb52b4be0c7b986a8908a1d453128f32dae04ab4d9bddd00ef86ee1" Jan 30 12:31:15 crc kubenswrapper[4703]: E0130 12:31:15.617741 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3dd51d48eb52b4be0c7b986a8908a1d453128f32dae04ab4d9bddd00ef86ee1\": container with ID starting with c3dd51d48eb52b4be0c7b986a8908a1d453128f32dae04ab4d9bddd00ef86ee1 not found: ID does not exist" containerID="c3dd51d48eb52b4be0c7b986a8908a1d453128f32dae04ab4d9bddd00ef86ee1" Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.617795 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3dd51d48eb52b4be0c7b986a8908a1d453128f32dae04ab4d9bddd00ef86ee1"} err="failed to get container status \"c3dd51d48eb52b4be0c7b986a8908a1d453128f32dae04ab4d9bddd00ef86ee1\": rpc error: code = NotFound desc = could not find container \"c3dd51d48eb52b4be0c7b986a8908a1d453128f32dae04ab4d9bddd00ef86ee1\": container with ID starting with c3dd51d48eb52b4be0c7b986a8908a1d453128f32dae04ab4d9bddd00ef86ee1 not found: ID does not exist" Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.640690 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.845756 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dfhth"] Jan 30 12:31:15 crc kubenswrapper[4703]: I0130 12:31:15.856067 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dfhth"] Jan 30 12:31:17 crc kubenswrapper[4703]: I0130 12:31:17.101166 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7d20c26-413b-4b4d-8c28-bbe10d95b5d2" path="/var/lib/kubelet/pods/a7d20c26-413b-4b4d-8c28-bbe10d95b5d2/volumes" Jan 30 12:31:23 crc kubenswrapper[4703]: I0130 12:31:23.087234 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:31:23 crc kubenswrapper[4703]: E0130 12:31:23.088652 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:31:35 crc kubenswrapper[4703]: I0130 12:31:35.099855 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:31:35 crc kubenswrapper[4703]: E0130 12:31:35.101306 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:31:47 crc kubenswrapper[4703]: I0130 12:31:47.087032 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:31:47 crc kubenswrapper[4703]: E0130 12:31:47.088304 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:31:49 crc kubenswrapper[4703]: I0130 12:31:49.901022 4703 generic.go:334] "Generic (PLEG): container finished" podID="e0374bdb-2201-41a5-90ff-2185eac3add1" containerID="9ef70cc0c091d0ddab52723883514e48c21f1eec5a57d69b082afc0f840d8c2e" exitCode=0 Jan 30 12:31:49 crc kubenswrapper[4703]: I0130 12:31:49.901114 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" event={"ID":"e0374bdb-2201-41a5-90ff-2185eac3add1","Type":"ContainerDied","Data":"9ef70cc0c091d0ddab52723883514e48c21f1eec5a57d69b082afc0f840d8c2e"} Jan 30 12:31:51 crc kubenswrapper[4703]: I0130 12:31:51.532836 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" Jan 30 12:31:51 crc kubenswrapper[4703]: I0130 12:31:51.705351 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7tcl6\" (UniqueName: \"kubernetes.io/projected/e0374bdb-2201-41a5-90ff-2185eac3add1-kube-api-access-7tcl6\") pod \"e0374bdb-2201-41a5-90ff-2185eac3add1\" (UID: \"e0374bdb-2201-41a5-90ff-2185eac3add1\") " Jan 30 12:31:51 crc kubenswrapper[4703]: I0130 12:31:51.705474 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e0374bdb-2201-41a5-90ff-2185eac3add1-ssh-key-openstack-edpm-ipam\") pod \"e0374bdb-2201-41a5-90ff-2185eac3add1\" (UID: \"e0374bdb-2201-41a5-90ff-2185eac3add1\") " Jan 30 12:31:51 crc kubenswrapper[4703]: I0130 12:31:51.705857 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0374bdb-2201-41a5-90ff-2185eac3add1-inventory\") pod \"e0374bdb-2201-41a5-90ff-2185eac3add1\" (UID: \"e0374bdb-2201-41a5-90ff-2185eac3add1\") " Jan 30 12:31:51 crc kubenswrapper[4703]: I0130 12:31:51.722959 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0374bdb-2201-41a5-90ff-2185eac3add1-kube-api-access-7tcl6" (OuterVolumeSpecName: "kube-api-access-7tcl6") pod "e0374bdb-2201-41a5-90ff-2185eac3add1" (UID: "e0374bdb-2201-41a5-90ff-2185eac3add1"). InnerVolumeSpecName "kube-api-access-7tcl6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:31:51 crc kubenswrapper[4703]: I0130 12:31:51.738212 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0374bdb-2201-41a5-90ff-2185eac3add1-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "e0374bdb-2201-41a5-90ff-2185eac3add1" (UID: "e0374bdb-2201-41a5-90ff-2185eac3add1"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:31:51 crc kubenswrapper[4703]: I0130 12:31:51.747425 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0374bdb-2201-41a5-90ff-2185eac3add1-inventory" (OuterVolumeSpecName: "inventory") pod "e0374bdb-2201-41a5-90ff-2185eac3add1" (UID: "e0374bdb-2201-41a5-90ff-2185eac3add1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:31:51 crc kubenswrapper[4703]: I0130 12:31:51.808997 4703 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e0374bdb-2201-41a5-90ff-2185eac3add1-inventory\") on node \"crc\" DevicePath \"\"" Jan 30 12:31:51 crc kubenswrapper[4703]: I0130 12:31:51.809308 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7tcl6\" (UniqueName: \"kubernetes.io/projected/e0374bdb-2201-41a5-90ff-2185eac3add1-kube-api-access-7tcl6\") on node \"crc\" DevicePath \"\"" Jan 30 12:31:51 crc kubenswrapper[4703]: I0130 12:31:51.809448 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e0374bdb-2201-41a5-90ff-2185eac3add1-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:31:51 crc kubenswrapper[4703]: I0130 12:31:51.937067 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" event={"ID":"e0374bdb-2201-41a5-90ff-2185eac3add1","Type":"ContainerDied","Data":"15f07c38f630bfbe14bf3b47c57108196be008642abf1c8c4e9ce377ae74d9d0"} Jan 30 12:31:51 crc kubenswrapper[4703]: I0130 12:31:51.937155 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="15f07c38f630bfbe14bf3b47c57108196be008642abf1c8c4e9ce377ae74d9d0" Jan 30 12:31:51 crc kubenswrapper[4703]: I0130 12:31:51.937313 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-vmszn" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.037889 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2"] Jan 30 12:31:52 crc kubenswrapper[4703]: E0130 12:31:52.038879 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7d20c26-413b-4b4d-8c28-bbe10d95b5d2" containerName="extract-utilities" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.038984 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7d20c26-413b-4b4d-8c28-bbe10d95b5d2" containerName="extract-utilities" Jan 30 12:31:52 crc kubenswrapper[4703]: E0130 12:31:52.039046 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7d20c26-413b-4b4d-8c28-bbe10d95b5d2" containerName="registry-server" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.039100 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7d20c26-413b-4b4d-8c28-bbe10d95b5d2" containerName="registry-server" Jan 30 12:31:52 crc kubenswrapper[4703]: E0130 12:31:52.039192 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7d20c26-413b-4b4d-8c28-bbe10d95b5d2" containerName="extract-content" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.039278 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7d20c26-413b-4b4d-8c28-bbe10d95b5d2" containerName="extract-content" Jan 30 12:31:52 crc kubenswrapper[4703]: E0130 12:31:52.039400 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0374bdb-2201-41a5-90ff-2185eac3add1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.039464 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0374bdb-2201-41a5-90ff-2185eac3add1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.039778 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0374bdb-2201-41a5-90ff-2185eac3add1" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.039870 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7d20c26-413b-4b4d-8c28-bbe10d95b5d2" containerName="registry-server" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.041088 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.047726 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.047722 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.047866 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-jjdl5" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.052007 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.052225 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2"] Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.219729 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q865d\" (UniqueName: \"kubernetes.io/projected/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-kube-api-access-q865d\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2\" (UID: \"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.219887 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2\" (UID: \"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.220207 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2\" (UID: \"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.323329 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q865d\" (UniqueName: \"kubernetes.io/projected/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-kube-api-access-q865d\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2\" (UID: \"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.323903 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2\" (UID: \"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.324088 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2\" (UID: \"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.333602 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2\" (UID: \"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.333653 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2\" (UID: \"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.344538 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q865d\" (UniqueName: \"kubernetes.io/projected/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-kube-api-access-q865d\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2\" (UID: \"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.363363 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" Jan 30 12:31:52 crc kubenswrapper[4703]: I0130 12:31:52.835095 4703 scope.go:117] "RemoveContainer" containerID="015a87566bb4a87ce6fd94f2a64e317d0a52bcdbe92e15a06a75e4ce2b5ac316" Jan 30 12:31:53 crc kubenswrapper[4703]: I0130 12:31:53.041422 4703 scope.go:117] "RemoveContainer" containerID="9da643b5dca57ef27b2b62dc081db040f72572014de6521b390288855be18261" Jan 30 12:31:53 crc kubenswrapper[4703]: I0130 12:31:53.127073 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2"] Jan 30 12:31:53 crc kubenswrapper[4703]: W0130 12:31:53.130923 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a36d42d_77b8_4ba1_8253_909a9bfb0c4a.slice/crio-7a3135d76c7800897c20f15f69aaaf4714dcce2253634fa2e15158e1c9bdc8ac WatchSource:0}: Error finding container 7a3135d76c7800897c20f15f69aaaf4714dcce2253634fa2e15158e1c9bdc8ac: Status 404 returned error can't find the container with id 7a3135d76c7800897c20f15f69aaaf4714dcce2253634fa2e15158e1c9bdc8ac Jan 30 12:31:53 crc kubenswrapper[4703]: I0130 12:31:53.134040 4703 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 12:31:54 crc kubenswrapper[4703]: I0130 12:31:54.052837 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" event={"ID":"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a","Type":"ContainerStarted","Data":"7a3135d76c7800897c20f15f69aaaf4714dcce2253634fa2e15158e1c9bdc8ac"} Jan 30 12:31:55 crc kubenswrapper[4703]: I0130 12:31:55.069225 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" event={"ID":"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a","Type":"ContainerStarted","Data":"24f5f33fb0a688f85c7177fc3edc40474772438eb5d3ccdb2fbb747db84e3b71"} Jan 30 12:31:55 crc kubenswrapper[4703]: I0130 12:31:55.101883 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" podStartSLOduration=2.412108474 podStartE2EDuration="3.101863117s" podCreationTimestamp="2026-01-30 12:31:52 +0000 UTC" firstStartedPulling="2026-01-30 12:31:53.133757865 +0000 UTC m=+2148.911579519" lastFinishedPulling="2026-01-30 12:31:53.823512508 +0000 UTC m=+2149.601334162" observedRunningTime="2026-01-30 12:31:55.091268005 +0000 UTC m=+2150.869089659" watchObservedRunningTime="2026-01-30 12:31:55.101863117 +0000 UTC m=+2150.879684771" Jan 30 12:32:00 crc kubenswrapper[4703]: I0130 12:32:00.086725 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:32:00 crc kubenswrapper[4703]: E0130 12:32:00.087996 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:32:05 crc kubenswrapper[4703]: I0130 12:32:05.061066 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-2jd7h"] Jan 30 12:32:05 crc kubenswrapper[4703]: I0130 12:32:05.074292 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-hczbb"] Jan 30 12:32:05 crc kubenswrapper[4703]: I0130 12:32:05.510574 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-x8fjs"] Jan 30 12:32:05 crc kubenswrapper[4703]: I0130 12:32:05.511059 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-2jd7h"] Jan 30 12:32:05 crc kubenswrapper[4703]: I0130 12:32:05.513757 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-hczbb"] Jan 30 12:32:05 crc kubenswrapper[4703]: I0130 12:32:05.523181 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-x8fjs"] Jan 30 12:32:06 crc kubenswrapper[4703]: I0130 12:32:06.046543 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-7fec-account-create-update-zft2k"] Jan 30 12:32:06 crc kubenswrapper[4703]: I0130 12:32:06.056335 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-7fec-account-create-update-zft2k"] Jan 30 12:32:07 crc kubenswrapper[4703]: I0130 12:32:07.066429 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-7935-account-create-update-tpmdh"] Jan 30 12:32:07 crc kubenswrapper[4703]: I0130 12:32:07.075637 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-7e42-account-create-update-skmfk"] Jan 30 12:32:07 crc kubenswrapper[4703]: I0130 12:32:07.101084 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d299026-7d0b-43f0-8f66-269e434576a9" path="/var/lib/kubelet/pods/2d299026-7d0b-43f0-8f66-269e434576a9/volumes" Jan 30 12:32:07 crc kubenswrapper[4703]: I0130 12:32:07.101929 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f2ddfec-e63d-4fbe-9c90-f9131bf80969" path="/var/lib/kubelet/pods/2f2ddfec-e63d-4fbe-9c90-f9131bf80969/volumes" Jan 30 12:32:07 crc kubenswrapper[4703]: I0130 12:32:07.102606 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82de3756-5f83-4cc6-9afd-8b359a2561f7" path="/var/lib/kubelet/pods/82de3756-5f83-4cc6-9afd-8b359a2561f7/volumes" Jan 30 12:32:07 crc kubenswrapper[4703]: I0130 12:32:07.103548 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1" path="/var/lib/kubelet/pods/8cd8ce2a-49a2-4b71-b8b7-15ef5cd68ee1/volumes" Jan 30 12:32:07 crc kubenswrapper[4703]: I0130 12:32:07.104946 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-7e42-account-create-update-skmfk"] Jan 30 12:32:07 crc kubenswrapper[4703]: I0130 12:32:07.104992 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-7935-account-create-update-tpmdh"] Jan 30 12:32:09 crc kubenswrapper[4703]: I0130 12:32:09.100979 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7998bb0-fbe6-46ad-bf20-227ac24143c8" path="/var/lib/kubelet/pods/a7998bb0-fbe6-46ad-bf20-227ac24143c8/volumes" Jan 30 12:32:09 crc kubenswrapper[4703]: I0130 12:32:09.102389 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc0160f9-a98b-4839-b10d-a23168a56417" path="/var/lib/kubelet/pods/bc0160f9-a98b-4839-b10d-a23168a56417/volumes" Jan 30 12:32:12 crc kubenswrapper[4703]: I0130 12:32:12.087812 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:32:12 crc kubenswrapper[4703]: E0130 12:32:12.089061 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:32:12 crc kubenswrapper[4703]: I0130 12:32:12.823444 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:32:12 crc kubenswrapper[4703]: I0130 12:32:12.824012 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:32:23 crc kubenswrapper[4703]: I0130 12:32:23.103099 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:32:23 crc kubenswrapper[4703]: E0130 12:32:23.104314 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:32:35 crc kubenswrapper[4703]: I0130 12:32:35.093551 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:32:35 crc kubenswrapper[4703]: E0130 12:32:35.094716 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:32:42 crc kubenswrapper[4703]: I0130 12:32:42.823113 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:32:42 crc kubenswrapper[4703]: I0130 12:32:42.824070 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:32:49 crc kubenswrapper[4703]: I0130 12:32:49.087243 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:32:49 crc kubenswrapper[4703]: E0130 12:32:49.088333 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:32:52 crc kubenswrapper[4703]: I0130 12:32:52.840901 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gmsjh"] Jan 30 12:32:52 crc kubenswrapper[4703]: I0130 12:32:52.844534 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gmsjh" Jan 30 12:32:52 crc kubenswrapper[4703]: I0130 12:32:52.859590 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gmsjh"] Jan 30 12:32:53 crc kubenswrapper[4703]: I0130 12:32:53.027785 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dzhn\" (UniqueName: \"kubernetes.io/projected/b016657c-ac98-415e-ab7d-c87bcd80e0d6-kube-api-access-7dzhn\") pod \"certified-operators-gmsjh\" (UID: \"b016657c-ac98-415e-ab7d-c87bcd80e0d6\") " pod="openshift-marketplace/certified-operators-gmsjh" Jan 30 12:32:53 crc kubenswrapper[4703]: I0130 12:32:53.028575 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b016657c-ac98-415e-ab7d-c87bcd80e0d6-catalog-content\") pod \"certified-operators-gmsjh\" (UID: \"b016657c-ac98-415e-ab7d-c87bcd80e0d6\") " pod="openshift-marketplace/certified-operators-gmsjh" Jan 30 12:32:53 crc kubenswrapper[4703]: I0130 12:32:53.028661 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b016657c-ac98-415e-ab7d-c87bcd80e0d6-utilities\") pod \"certified-operators-gmsjh\" (UID: \"b016657c-ac98-415e-ab7d-c87bcd80e0d6\") " pod="openshift-marketplace/certified-operators-gmsjh" Jan 30 12:32:53 crc kubenswrapper[4703]: I0130 12:32:53.131523 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dzhn\" (UniqueName: \"kubernetes.io/projected/b016657c-ac98-415e-ab7d-c87bcd80e0d6-kube-api-access-7dzhn\") pod \"certified-operators-gmsjh\" (UID: \"b016657c-ac98-415e-ab7d-c87bcd80e0d6\") " pod="openshift-marketplace/certified-operators-gmsjh" Jan 30 12:32:53 crc kubenswrapper[4703]: I0130 12:32:53.131842 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b016657c-ac98-415e-ab7d-c87bcd80e0d6-catalog-content\") pod \"certified-operators-gmsjh\" (UID: \"b016657c-ac98-415e-ab7d-c87bcd80e0d6\") " pod="openshift-marketplace/certified-operators-gmsjh" Jan 30 12:32:53 crc kubenswrapper[4703]: I0130 12:32:53.131886 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b016657c-ac98-415e-ab7d-c87bcd80e0d6-utilities\") pod \"certified-operators-gmsjh\" (UID: \"b016657c-ac98-415e-ab7d-c87bcd80e0d6\") " pod="openshift-marketplace/certified-operators-gmsjh" Jan 30 12:32:53 crc kubenswrapper[4703]: I0130 12:32:53.132951 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b016657c-ac98-415e-ab7d-c87bcd80e0d6-utilities\") pod \"certified-operators-gmsjh\" (UID: \"b016657c-ac98-415e-ab7d-c87bcd80e0d6\") " pod="openshift-marketplace/certified-operators-gmsjh" Jan 30 12:32:53 crc kubenswrapper[4703]: I0130 12:32:53.133002 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b016657c-ac98-415e-ab7d-c87bcd80e0d6-catalog-content\") pod \"certified-operators-gmsjh\" (UID: \"b016657c-ac98-415e-ab7d-c87bcd80e0d6\") " pod="openshift-marketplace/certified-operators-gmsjh" Jan 30 12:32:53 crc kubenswrapper[4703]: I0130 12:32:53.158879 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dzhn\" (UniqueName: \"kubernetes.io/projected/b016657c-ac98-415e-ab7d-c87bcd80e0d6-kube-api-access-7dzhn\") pod \"certified-operators-gmsjh\" (UID: \"b016657c-ac98-415e-ab7d-c87bcd80e0d6\") " pod="openshift-marketplace/certified-operators-gmsjh" Jan 30 12:32:53 crc kubenswrapper[4703]: I0130 12:32:53.179384 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gmsjh" Jan 30 12:32:53 crc kubenswrapper[4703]: I0130 12:32:53.204136 4703 scope.go:117] "RemoveContainer" containerID="96e429b7f97933e42d40124d44a156d4ef71adcdbdb6d58507bf462568b32612" Jan 30 12:32:53 crc kubenswrapper[4703]: I0130 12:32:53.399452 4703 scope.go:117] "RemoveContainer" containerID="53fd485e55a85bd02e29df0b85806adee9e53ccee5ced2686a8b1a1fcda98ca4" Jan 30 12:32:53 crc kubenswrapper[4703]: I0130 12:32:53.447513 4703 scope.go:117] "RemoveContainer" containerID="02935b86d9ba3a50b03c82fd1c55ca5b73c248933f84c13fdff687d92d515897" Jan 30 12:32:53 crc kubenswrapper[4703]: I0130 12:32:53.536093 4703 scope.go:117] "RemoveContainer" containerID="9f2257b842d2944119879d7107a1b3214c625fcbd1cd347ee2de9edf964d0e03" Jan 30 12:32:53 crc kubenswrapper[4703]: I0130 12:32:53.606357 4703 scope.go:117] "RemoveContainer" containerID="e31f986a4561d64e76a8d23b28a7783265cc56369ea559cc6810855ad4df2c58" Jan 30 12:32:53 crc kubenswrapper[4703]: I0130 12:32:53.668878 4703 scope.go:117] "RemoveContainer" containerID="88757d7a644a38eed98801b8b6c43b72f90255da67c001442dfa1af21e126496" Jan 30 12:32:53 crc kubenswrapper[4703]: I0130 12:32:53.860941 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gmsjh"] Jan 30 12:32:53 crc kubenswrapper[4703]: W0130 12:32:53.868410 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb016657c_ac98_415e_ab7d_c87bcd80e0d6.slice/crio-eaa7bb7350dbff55776ed21c5623352f3a2ce6a4bc65f2910257d322091f87a8 WatchSource:0}: Error finding container eaa7bb7350dbff55776ed21c5623352f3a2ce6a4bc65f2910257d322091f87a8: Status 404 returned error can't find the container with id eaa7bb7350dbff55776ed21c5623352f3a2ce6a4bc65f2910257d322091f87a8 Jan 30 12:32:54 crc kubenswrapper[4703]: I0130 12:32:54.357248 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmsjh" event={"ID":"b016657c-ac98-415e-ab7d-c87bcd80e0d6","Type":"ContainerStarted","Data":"eaa7bb7350dbff55776ed21c5623352f3a2ce6a4bc65f2910257d322091f87a8"} Jan 30 12:32:55 crc kubenswrapper[4703]: I0130 12:32:55.371444 4703 generic.go:334] "Generic (PLEG): container finished" podID="b016657c-ac98-415e-ab7d-c87bcd80e0d6" containerID="cc939d7775c5c1be605f746ec952b81df41aa2ae9466148a8ddc7a58bf1bf3a1" exitCode=0 Jan 30 12:32:55 crc kubenswrapper[4703]: I0130 12:32:55.371516 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmsjh" event={"ID":"b016657c-ac98-415e-ab7d-c87bcd80e0d6","Type":"ContainerDied","Data":"cc939d7775c5c1be605f746ec952b81df41aa2ae9466148a8ddc7a58bf1bf3a1"} Jan 30 12:32:56 crc kubenswrapper[4703]: I0130 12:32:56.388236 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmsjh" event={"ID":"b016657c-ac98-415e-ab7d-c87bcd80e0d6","Type":"ContainerStarted","Data":"c49655a12df60a5b5f4d139f48b55c00551d9f537e7704fa13f987c2bff73dfc"} Jan 30 12:32:57 crc kubenswrapper[4703]: I0130 12:32:57.399688 4703 generic.go:334] "Generic (PLEG): container finished" podID="b016657c-ac98-415e-ab7d-c87bcd80e0d6" containerID="c49655a12df60a5b5f4d139f48b55c00551d9f537e7704fa13f987c2bff73dfc" exitCode=0 Jan 30 12:32:57 crc kubenswrapper[4703]: I0130 12:32:57.399797 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmsjh" event={"ID":"b016657c-ac98-415e-ab7d-c87bcd80e0d6","Type":"ContainerDied","Data":"c49655a12df60a5b5f4d139f48b55c00551d9f537e7704fa13f987c2bff73dfc"} Jan 30 12:32:58 crc kubenswrapper[4703]: I0130 12:32:58.584411 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmsjh" event={"ID":"b016657c-ac98-415e-ab7d-c87bcd80e0d6","Type":"ContainerStarted","Data":"93e07ae0186ef698454f2f1a83611a22e7b85e9a5fd67d5148db858ec77827a9"} Jan 30 12:32:58 crc kubenswrapper[4703]: I0130 12:32:58.620955 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gmsjh" podStartSLOduration=4.13236032 podStartE2EDuration="6.620926062s" podCreationTimestamp="2026-01-30 12:32:52 +0000 UTC" firstStartedPulling="2026-01-30 12:32:55.375341614 +0000 UTC m=+2211.153163268" lastFinishedPulling="2026-01-30 12:32:57.863907356 +0000 UTC m=+2213.641729010" observedRunningTime="2026-01-30 12:32:58.611869101 +0000 UTC m=+2214.389690765" watchObservedRunningTime="2026-01-30 12:32:58.620926062 +0000 UTC m=+2214.398747716" Jan 30 12:33:01 crc kubenswrapper[4703]: I0130 12:33:01.086665 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:33:01 crc kubenswrapper[4703]: E0130 12:33:01.087473 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:33:03 crc kubenswrapper[4703]: I0130 12:33:03.181066 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gmsjh" Jan 30 12:33:03 crc kubenswrapper[4703]: I0130 12:33:03.182715 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gmsjh" Jan 30 12:33:03 crc kubenswrapper[4703]: I0130 12:33:03.240753 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gmsjh" Jan 30 12:33:03 crc kubenswrapper[4703]: I0130 12:33:03.799874 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gmsjh" Jan 30 12:33:03 crc kubenswrapper[4703]: I0130 12:33:03.857339 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gmsjh"] Jan 30 12:33:05 crc kubenswrapper[4703]: I0130 12:33:05.770956 4703 generic.go:334] "Generic (PLEG): container finished" podID="2a36d42d-77b8-4ba1-8253-909a9bfb0c4a" containerID="24f5f33fb0a688f85c7177fc3edc40474772438eb5d3ccdb2fbb747db84e3b71" exitCode=0 Jan 30 12:33:05 crc kubenswrapper[4703]: I0130 12:33:05.771047 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" event={"ID":"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a","Type":"ContainerDied","Data":"24f5f33fb0a688f85c7177fc3edc40474772438eb5d3ccdb2fbb747db84e3b71"} Jan 30 12:33:05 crc kubenswrapper[4703]: I0130 12:33:05.771807 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gmsjh" podUID="b016657c-ac98-415e-ab7d-c87bcd80e0d6" containerName="registry-server" containerID="cri-o://93e07ae0186ef698454f2f1a83611a22e7b85e9a5fd67d5148db858ec77827a9" gracePeriod=2 Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.448175 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gmsjh" Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.567331 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b016657c-ac98-415e-ab7d-c87bcd80e0d6-catalog-content\") pod \"b016657c-ac98-415e-ab7d-c87bcd80e0d6\" (UID: \"b016657c-ac98-415e-ab7d-c87bcd80e0d6\") " Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.568049 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b016657c-ac98-415e-ab7d-c87bcd80e0d6-utilities\") pod \"b016657c-ac98-415e-ab7d-c87bcd80e0d6\" (UID: \"b016657c-ac98-415e-ab7d-c87bcd80e0d6\") " Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.568333 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7dzhn\" (UniqueName: \"kubernetes.io/projected/b016657c-ac98-415e-ab7d-c87bcd80e0d6-kube-api-access-7dzhn\") pod \"b016657c-ac98-415e-ab7d-c87bcd80e0d6\" (UID: \"b016657c-ac98-415e-ab7d-c87bcd80e0d6\") " Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.568743 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b016657c-ac98-415e-ab7d-c87bcd80e0d6-utilities" (OuterVolumeSpecName: "utilities") pod "b016657c-ac98-415e-ab7d-c87bcd80e0d6" (UID: "b016657c-ac98-415e-ab7d-c87bcd80e0d6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.570368 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b016657c-ac98-415e-ab7d-c87bcd80e0d6-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.579407 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b016657c-ac98-415e-ab7d-c87bcd80e0d6-kube-api-access-7dzhn" (OuterVolumeSpecName: "kube-api-access-7dzhn") pod "b016657c-ac98-415e-ab7d-c87bcd80e0d6" (UID: "b016657c-ac98-415e-ab7d-c87bcd80e0d6"). InnerVolumeSpecName "kube-api-access-7dzhn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.673669 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7dzhn\" (UniqueName: \"kubernetes.io/projected/b016657c-ac98-415e-ab7d-c87bcd80e0d6-kube-api-access-7dzhn\") on node \"crc\" DevicePath \"\"" Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.775878 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b016657c-ac98-415e-ab7d-c87bcd80e0d6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b016657c-ac98-415e-ab7d-c87bcd80e0d6" (UID: "b016657c-ac98-415e-ab7d-c87bcd80e0d6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.779300 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b016657c-ac98-415e-ab7d-c87bcd80e0d6-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.789225 4703 generic.go:334] "Generic (PLEG): container finished" podID="b016657c-ac98-415e-ab7d-c87bcd80e0d6" containerID="93e07ae0186ef698454f2f1a83611a22e7b85e9a5fd67d5148db858ec77827a9" exitCode=0 Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.789304 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmsjh" event={"ID":"b016657c-ac98-415e-ab7d-c87bcd80e0d6","Type":"ContainerDied","Data":"93e07ae0186ef698454f2f1a83611a22e7b85e9a5fd67d5148db858ec77827a9"} Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.789367 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gmsjh" Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.789397 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gmsjh" event={"ID":"b016657c-ac98-415e-ab7d-c87bcd80e0d6","Type":"ContainerDied","Data":"eaa7bb7350dbff55776ed21c5623352f3a2ce6a4bc65f2910257d322091f87a8"} Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.789423 4703 scope.go:117] "RemoveContainer" containerID="93e07ae0186ef698454f2f1a83611a22e7b85e9a5fd67d5148db858ec77827a9" Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.835994 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gmsjh"] Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.849831 4703 scope.go:117] "RemoveContainer" containerID="c49655a12df60a5b5f4d139f48b55c00551d9f537e7704fa13f987c2bff73dfc" Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.852080 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-gmsjh"] Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.916982 4703 scope.go:117] "RemoveContainer" containerID="cc939d7775c5c1be605f746ec952b81df41aa2ae9466148a8ddc7a58bf1bf3a1" Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.974221 4703 scope.go:117] "RemoveContainer" containerID="93e07ae0186ef698454f2f1a83611a22e7b85e9a5fd67d5148db858ec77827a9" Jan 30 12:33:06 crc kubenswrapper[4703]: E0130 12:33:06.979401 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"93e07ae0186ef698454f2f1a83611a22e7b85e9a5fd67d5148db858ec77827a9\": container with ID starting with 93e07ae0186ef698454f2f1a83611a22e7b85e9a5fd67d5148db858ec77827a9 not found: ID does not exist" containerID="93e07ae0186ef698454f2f1a83611a22e7b85e9a5fd67d5148db858ec77827a9" Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.979644 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"93e07ae0186ef698454f2f1a83611a22e7b85e9a5fd67d5148db858ec77827a9"} err="failed to get container status \"93e07ae0186ef698454f2f1a83611a22e7b85e9a5fd67d5148db858ec77827a9\": rpc error: code = NotFound desc = could not find container \"93e07ae0186ef698454f2f1a83611a22e7b85e9a5fd67d5148db858ec77827a9\": container with ID starting with 93e07ae0186ef698454f2f1a83611a22e7b85e9a5fd67d5148db858ec77827a9 not found: ID does not exist" Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.979689 4703 scope.go:117] "RemoveContainer" containerID="c49655a12df60a5b5f4d139f48b55c00551d9f537e7704fa13f987c2bff73dfc" Jan 30 12:33:06 crc kubenswrapper[4703]: E0130 12:33:06.980252 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c49655a12df60a5b5f4d139f48b55c00551d9f537e7704fa13f987c2bff73dfc\": container with ID starting with c49655a12df60a5b5f4d139f48b55c00551d9f537e7704fa13f987c2bff73dfc not found: ID does not exist" containerID="c49655a12df60a5b5f4d139f48b55c00551d9f537e7704fa13f987c2bff73dfc" Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.980298 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c49655a12df60a5b5f4d139f48b55c00551d9f537e7704fa13f987c2bff73dfc"} err="failed to get container status \"c49655a12df60a5b5f4d139f48b55c00551d9f537e7704fa13f987c2bff73dfc\": rpc error: code = NotFound desc = could not find container \"c49655a12df60a5b5f4d139f48b55c00551d9f537e7704fa13f987c2bff73dfc\": container with ID starting with c49655a12df60a5b5f4d139f48b55c00551d9f537e7704fa13f987c2bff73dfc not found: ID does not exist" Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.980339 4703 scope.go:117] "RemoveContainer" containerID="cc939d7775c5c1be605f746ec952b81df41aa2ae9466148a8ddc7a58bf1bf3a1" Jan 30 12:33:06 crc kubenswrapper[4703]: E0130 12:33:06.980998 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc939d7775c5c1be605f746ec952b81df41aa2ae9466148a8ddc7a58bf1bf3a1\": container with ID starting with cc939d7775c5c1be605f746ec952b81df41aa2ae9466148a8ddc7a58bf1bf3a1 not found: ID does not exist" containerID="cc939d7775c5c1be605f746ec952b81df41aa2ae9466148a8ddc7a58bf1bf3a1" Jan 30 12:33:06 crc kubenswrapper[4703]: I0130 12:33:06.981030 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc939d7775c5c1be605f746ec952b81df41aa2ae9466148a8ddc7a58bf1bf3a1"} err="failed to get container status \"cc939d7775c5c1be605f746ec952b81df41aa2ae9466148a8ddc7a58bf1bf3a1\": rpc error: code = NotFound desc = could not find container \"cc939d7775c5c1be605f746ec952b81df41aa2ae9466148a8ddc7a58bf1bf3a1\": container with ID starting with cc939d7775c5c1be605f746ec952b81df41aa2ae9466148a8ddc7a58bf1bf3a1 not found: ID does not exist" Jan 30 12:33:07 crc kubenswrapper[4703]: I0130 12:33:07.137678 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b016657c-ac98-415e-ab7d-c87bcd80e0d6" path="/var/lib/kubelet/pods/b016657c-ac98-415e-ab7d-c87bcd80e0d6/volumes" Jan 30 12:33:07 crc kubenswrapper[4703]: I0130 12:33:07.299170 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" Jan 30 12:33:07 crc kubenswrapper[4703]: I0130 12:33:07.406732 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q865d\" (UniqueName: \"kubernetes.io/projected/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-kube-api-access-q865d\") pod \"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a\" (UID: \"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a\") " Jan 30 12:33:07 crc kubenswrapper[4703]: I0130 12:33:07.407316 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-inventory\") pod \"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a\" (UID: \"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a\") " Jan 30 12:33:07 crc kubenswrapper[4703]: I0130 12:33:07.407459 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-ssh-key-openstack-edpm-ipam\") pod \"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a\" (UID: \"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a\") " Jan 30 12:33:07 crc kubenswrapper[4703]: I0130 12:33:07.420645 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-kube-api-access-q865d" (OuterVolumeSpecName: "kube-api-access-q865d") pod "2a36d42d-77b8-4ba1-8253-909a9bfb0c4a" (UID: "2a36d42d-77b8-4ba1-8253-909a9bfb0c4a"). InnerVolumeSpecName "kube-api-access-q865d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:33:07 crc kubenswrapper[4703]: I0130 12:33:07.441655 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "2a36d42d-77b8-4ba1-8253-909a9bfb0c4a" (UID: "2a36d42d-77b8-4ba1-8253-909a9bfb0c4a"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:33:07 crc kubenswrapper[4703]: I0130 12:33:07.445931 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-inventory" (OuterVolumeSpecName: "inventory") pod "2a36d42d-77b8-4ba1-8253-909a9bfb0c4a" (UID: "2a36d42d-77b8-4ba1-8253-909a9bfb0c4a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:33:07 crc kubenswrapper[4703]: I0130 12:33:07.510341 4703 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-inventory\") on node \"crc\" DevicePath \"\"" Jan 30 12:33:07 crc kubenswrapper[4703]: I0130 12:33:07.510391 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:33:07 crc kubenswrapper[4703]: I0130 12:33:07.510412 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q865d\" (UniqueName: \"kubernetes.io/projected/2a36d42d-77b8-4ba1-8253-909a9bfb0c4a-kube-api-access-q865d\") on node \"crc\" DevicePath \"\"" Jan 30 12:33:07 crc kubenswrapper[4703]: I0130 12:33:07.803467 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" event={"ID":"2a36d42d-77b8-4ba1-8253-909a9bfb0c4a","Type":"ContainerDied","Data":"7a3135d76c7800897c20f15f69aaaf4714dcce2253634fa2e15158e1c9bdc8ac"} Jan 30 12:33:07 crc kubenswrapper[4703]: I0130 12:33:07.803546 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a3135d76c7800897c20f15f69aaaf4714dcce2253634fa2e15158e1c9bdc8ac" Jan 30 12:33:07 crc kubenswrapper[4703]: I0130 12:33:07.803569 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.022234 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp"] Jan 30 12:33:08 crc kubenswrapper[4703]: E0130 12:33:08.022776 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b016657c-ac98-415e-ab7d-c87bcd80e0d6" containerName="extract-content" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.022802 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="b016657c-ac98-415e-ab7d-c87bcd80e0d6" containerName="extract-content" Jan 30 12:33:08 crc kubenswrapper[4703]: E0130 12:33:08.022835 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a36d42d-77b8-4ba1-8253-909a9bfb0c4a" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.022846 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a36d42d-77b8-4ba1-8253-909a9bfb0c4a" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 30 12:33:08 crc kubenswrapper[4703]: E0130 12:33:08.022876 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b016657c-ac98-415e-ab7d-c87bcd80e0d6" containerName="extract-utilities" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.022886 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="b016657c-ac98-415e-ab7d-c87bcd80e0d6" containerName="extract-utilities" Jan 30 12:33:08 crc kubenswrapper[4703]: E0130 12:33:08.022905 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b016657c-ac98-415e-ab7d-c87bcd80e0d6" containerName="registry-server" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.022914 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="b016657c-ac98-415e-ab7d-c87bcd80e0d6" containerName="registry-server" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.023460 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="b016657c-ac98-415e-ab7d-c87bcd80e0d6" containerName="registry-server" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.023499 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a36d42d-77b8-4ba1-8253-909a9bfb0c4a" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.024635 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.028463 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.029021 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.029446 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-jjdl5" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.035815 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp"] Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.074274 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.226227 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07c6833e-dfc2-43df-812b-639533947bcb-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-glhhp\" (UID: \"07c6833e-dfc2-43df-812b-639533947bcb\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.227815 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/07c6833e-dfc2-43df-812b-639533947bcb-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-glhhp\" (UID: \"07c6833e-dfc2-43df-812b-639533947bcb\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.227957 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwtc5\" (UniqueName: \"kubernetes.io/projected/07c6833e-dfc2-43df-812b-639533947bcb-kube-api-access-bwtc5\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-glhhp\" (UID: \"07c6833e-dfc2-43df-812b-639533947bcb\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.331407 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/07c6833e-dfc2-43df-812b-639533947bcb-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-glhhp\" (UID: \"07c6833e-dfc2-43df-812b-639533947bcb\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.331513 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwtc5\" (UniqueName: \"kubernetes.io/projected/07c6833e-dfc2-43df-812b-639533947bcb-kube-api-access-bwtc5\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-glhhp\" (UID: \"07c6833e-dfc2-43df-812b-639533947bcb\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.332333 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07c6833e-dfc2-43df-812b-639533947bcb-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-glhhp\" (UID: \"07c6833e-dfc2-43df-812b-639533947bcb\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.336952 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07c6833e-dfc2-43df-812b-639533947bcb-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-glhhp\" (UID: \"07c6833e-dfc2-43df-812b-639533947bcb\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.339658 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/07c6833e-dfc2-43df-812b-639533947bcb-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-glhhp\" (UID: \"07c6833e-dfc2-43df-812b-639533947bcb\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.351749 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwtc5\" (UniqueName: \"kubernetes.io/projected/07c6833e-dfc2-43df-812b-639533947bcb-kube-api-access-bwtc5\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-glhhp\" (UID: \"07c6833e-dfc2-43df-812b-639533947bcb\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" Jan 30 12:33:08 crc kubenswrapper[4703]: I0130 12:33:08.390840 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" Jan 30 12:33:09 crc kubenswrapper[4703]: I0130 12:33:09.394951 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp"] Jan 30 12:33:09 crc kubenswrapper[4703]: I0130 12:33:09.824911 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" event={"ID":"07c6833e-dfc2-43df-812b-639533947bcb","Type":"ContainerStarted","Data":"f456f475a8b695fb696d918f74aae40e544a30d8e6611f0d30d501081a0bef89"} Jan 30 12:33:10 crc kubenswrapper[4703]: I0130 12:33:10.052460 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-2cgdm"] Jan 30 12:33:10 crc kubenswrapper[4703]: I0130 12:33:10.067049 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-2cgdm"] Jan 30 12:33:10 crc kubenswrapper[4703]: I0130 12:33:10.849404 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" event={"ID":"07c6833e-dfc2-43df-812b-639533947bcb","Type":"ContainerStarted","Data":"4babedb81edced5f4b189ba6c5eecd7639323c3be66af3b816b267d18832fd41"} Jan 30 12:33:10 crc kubenswrapper[4703]: I0130 12:33:10.884151 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" podStartSLOduration=3.397285214 podStartE2EDuration="3.884107615s" podCreationTimestamp="2026-01-30 12:33:07 +0000 UTC" firstStartedPulling="2026-01-30 12:33:09.387367632 +0000 UTC m=+2225.165189286" lastFinishedPulling="2026-01-30 12:33:09.874190023 +0000 UTC m=+2225.652011687" observedRunningTime="2026-01-30 12:33:10.871441777 +0000 UTC m=+2226.649263441" watchObservedRunningTime="2026-01-30 12:33:10.884107615 +0000 UTC m=+2226.661929269" Jan 30 12:33:11 crc kubenswrapper[4703]: I0130 12:33:11.104205 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7284929e-736f-4b9e-bce6-b2128abd47fc" path="/var/lib/kubelet/pods/7284929e-736f-4b9e-bce6-b2128abd47fc/volumes" Jan 30 12:33:12 crc kubenswrapper[4703]: I0130 12:33:12.087304 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:33:12 crc kubenswrapper[4703]: E0130 12:33:12.087627 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:33:12 crc kubenswrapper[4703]: I0130 12:33:12.822966 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:33:12 crc kubenswrapper[4703]: I0130 12:33:12.823310 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:33:12 crc kubenswrapper[4703]: I0130 12:33:12.823477 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 12:33:12 crc kubenswrapper[4703]: I0130 12:33:12.824689 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5fa5c313f1cbb35a96c31064d256324b86779414c02a6d827c9dc4a8b556105a"} pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 12:33:12 crc kubenswrapper[4703]: I0130 12:33:12.824766 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" containerID="cri-o://5fa5c313f1cbb35a96c31064d256324b86779414c02a6d827c9dc4a8b556105a" gracePeriod=600 Jan 30 12:33:13 crc kubenswrapper[4703]: I0130 12:33:13.886998 4703 generic.go:334] "Generic (PLEG): container finished" podID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerID="5fa5c313f1cbb35a96c31064d256324b86779414c02a6d827c9dc4a8b556105a" exitCode=0 Jan 30 12:33:13 crc kubenswrapper[4703]: I0130 12:33:13.887075 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerDied","Data":"5fa5c313f1cbb35a96c31064d256324b86779414c02a6d827c9dc4a8b556105a"} Jan 30 12:33:13 crc kubenswrapper[4703]: I0130 12:33:13.887893 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerStarted","Data":"23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7"} Jan 30 12:33:13 crc kubenswrapper[4703]: I0130 12:33:13.887924 4703 scope.go:117] "RemoveContainer" containerID="3013b1ad779b30deefbc893815000ad75dfcd101db6b8cb5069f9393dbab1127" Jan 30 12:33:15 crc kubenswrapper[4703]: I0130 12:33:15.966273 4703 generic.go:334] "Generic (PLEG): container finished" podID="07c6833e-dfc2-43df-812b-639533947bcb" containerID="4babedb81edced5f4b189ba6c5eecd7639323c3be66af3b816b267d18832fd41" exitCode=0 Jan 30 12:33:15 crc kubenswrapper[4703]: I0130 12:33:15.966360 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" event={"ID":"07c6833e-dfc2-43df-812b-639533947bcb","Type":"ContainerDied","Data":"4babedb81edced5f4b189ba6c5eecd7639323c3be66af3b816b267d18832fd41"} Jan 30 12:33:17 crc kubenswrapper[4703]: I0130 12:33:17.675363 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" Jan 30 12:33:17 crc kubenswrapper[4703]: I0130 12:33:17.733027 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07c6833e-dfc2-43df-812b-639533947bcb-inventory\") pod \"07c6833e-dfc2-43df-812b-639533947bcb\" (UID: \"07c6833e-dfc2-43df-812b-639533947bcb\") " Jan 30 12:33:17 crc kubenswrapper[4703]: I0130 12:33:17.733263 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwtc5\" (UniqueName: \"kubernetes.io/projected/07c6833e-dfc2-43df-812b-639533947bcb-kube-api-access-bwtc5\") pod \"07c6833e-dfc2-43df-812b-639533947bcb\" (UID: \"07c6833e-dfc2-43df-812b-639533947bcb\") " Jan 30 12:33:17 crc kubenswrapper[4703]: I0130 12:33:17.733297 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/07c6833e-dfc2-43df-812b-639533947bcb-ssh-key-openstack-edpm-ipam\") pod \"07c6833e-dfc2-43df-812b-639533947bcb\" (UID: \"07c6833e-dfc2-43df-812b-639533947bcb\") " Jan 30 12:33:17 crc kubenswrapper[4703]: I0130 12:33:17.742161 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07c6833e-dfc2-43df-812b-639533947bcb-kube-api-access-bwtc5" (OuterVolumeSpecName: "kube-api-access-bwtc5") pod "07c6833e-dfc2-43df-812b-639533947bcb" (UID: "07c6833e-dfc2-43df-812b-639533947bcb"). InnerVolumeSpecName "kube-api-access-bwtc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:33:17 crc kubenswrapper[4703]: I0130 12:33:17.772530 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07c6833e-dfc2-43df-812b-639533947bcb-inventory" (OuterVolumeSpecName: "inventory") pod "07c6833e-dfc2-43df-812b-639533947bcb" (UID: "07c6833e-dfc2-43df-812b-639533947bcb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:33:17 crc kubenswrapper[4703]: I0130 12:33:17.775487 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07c6833e-dfc2-43df-812b-639533947bcb-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "07c6833e-dfc2-43df-812b-639533947bcb" (UID: "07c6833e-dfc2-43df-812b-639533947bcb"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:33:17 crc kubenswrapper[4703]: I0130 12:33:17.836929 4703 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07c6833e-dfc2-43df-812b-639533947bcb-inventory\") on node \"crc\" DevicePath \"\"" Jan 30 12:33:17 crc kubenswrapper[4703]: I0130 12:33:17.836985 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwtc5\" (UniqueName: \"kubernetes.io/projected/07c6833e-dfc2-43df-812b-639533947bcb-kube-api-access-bwtc5\") on node \"crc\" DevicePath \"\"" Jan 30 12:33:17 crc kubenswrapper[4703]: I0130 12:33:17.837002 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/07c6833e-dfc2-43df-812b-639533947bcb-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.157763 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" event={"ID":"07c6833e-dfc2-43df-812b-639533947bcb","Type":"ContainerDied","Data":"f456f475a8b695fb696d918f74aae40e544a30d8e6611f0d30d501081a0bef89"} Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.157839 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f456f475a8b695fb696d918f74aae40e544a30d8e6611f0d30d501081a0bef89" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.157939 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-glhhp" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.219808 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b"] Jan 30 12:33:18 crc kubenswrapper[4703]: E0130 12:33:18.220813 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07c6833e-dfc2-43df-812b-639533947bcb" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.220928 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="07c6833e-dfc2-43df-812b-639533947bcb" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.221258 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="07c6833e-dfc2-43df-812b-639533947bcb" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.222208 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.232026 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.234044 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.234587 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-jjdl5" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.234659 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.234587 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b"] Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.396881 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zbg8b\" (UID: \"3dc7e43f-3f6e-4d19-970d-bd84acc019bd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.397138 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zbg8b\" (UID: \"3dc7e43f-3f6e-4d19-970d-bd84acc019bd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.397186 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmr4d\" (UniqueName: \"kubernetes.io/projected/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-kube-api-access-tmr4d\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zbg8b\" (UID: \"3dc7e43f-3f6e-4d19-970d-bd84acc019bd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.499839 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zbg8b\" (UID: \"3dc7e43f-3f6e-4d19-970d-bd84acc019bd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.500388 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zbg8b\" (UID: \"3dc7e43f-3f6e-4d19-970d-bd84acc019bd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.500539 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmr4d\" (UniqueName: \"kubernetes.io/projected/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-kube-api-access-tmr4d\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zbg8b\" (UID: \"3dc7e43f-3f6e-4d19-970d-bd84acc019bd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.507051 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zbg8b\" (UID: \"3dc7e43f-3f6e-4d19-970d-bd84acc019bd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.507158 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zbg8b\" (UID: \"3dc7e43f-3f6e-4d19-970d-bd84acc019bd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.525887 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmr4d\" (UniqueName: \"kubernetes.io/projected/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-kube-api-access-tmr4d\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-zbg8b\" (UID: \"3dc7e43f-3f6e-4d19-970d-bd84acc019bd\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" Jan 30 12:33:18 crc kubenswrapper[4703]: I0130 12:33:18.616038 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" Jan 30 12:33:19 crc kubenswrapper[4703]: I0130 12:33:19.232255 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b"] Jan 30 12:33:19 crc kubenswrapper[4703]: W0130 12:33:19.250297 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3dc7e43f_3f6e_4d19_970d_bd84acc019bd.slice/crio-27bfc07b01e83966f80dd92f9b04ea59a0c27aa135c6ad90f093f1367219ae4f WatchSource:0}: Error finding container 27bfc07b01e83966f80dd92f9b04ea59a0c27aa135c6ad90f093f1367219ae4f: Status 404 returned error can't find the container with id 27bfc07b01e83966f80dd92f9b04ea59a0c27aa135c6ad90f093f1367219ae4f Jan 30 12:33:20 crc kubenswrapper[4703]: I0130 12:33:20.184829 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" event={"ID":"3dc7e43f-3f6e-4d19-970d-bd84acc019bd","Type":"ContainerStarted","Data":"27bfc07b01e83966f80dd92f9b04ea59a0c27aa135c6ad90f093f1367219ae4f"} Jan 30 12:33:21 crc kubenswrapper[4703]: I0130 12:33:21.201538 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" event={"ID":"3dc7e43f-3f6e-4d19-970d-bd84acc019bd","Type":"ContainerStarted","Data":"a7bbaaa9ce8deb36c867d1bffb4249cc38b7698a73d3e06ee7240336d98dfc29"} Jan 30 12:33:21 crc kubenswrapper[4703]: I0130 12:33:21.231859 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" podStartSLOduration=2.815715103 podStartE2EDuration="3.231831919s" podCreationTimestamp="2026-01-30 12:33:18 +0000 UTC" firstStartedPulling="2026-01-30 12:33:19.257173593 +0000 UTC m=+2235.034995247" lastFinishedPulling="2026-01-30 12:33:19.673290409 +0000 UTC m=+2235.451112063" observedRunningTime="2026-01-30 12:33:21.224403931 +0000 UTC m=+2237.002225585" watchObservedRunningTime="2026-01-30 12:33:21.231831919 +0000 UTC m=+2237.009653573" Jan 30 12:33:26 crc kubenswrapper[4703]: I0130 12:33:26.087649 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:33:26 crc kubenswrapper[4703]: E0130 12:33:26.088932 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:33:37 crc kubenswrapper[4703]: I0130 12:33:37.087365 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:33:37 crc kubenswrapper[4703]: E0130 12:33:37.088889 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:33:38 crc kubenswrapper[4703]: I0130 12:33:38.073611 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-d9t7g"] Jan 30 12:33:38 crc kubenswrapper[4703]: I0130 12:33:38.085760 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-d9t7g"] Jan 30 12:33:39 crc kubenswrapper[4703]: I0130 12:33:39.040167 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-wbn8k"] Jan 30 12:33:39 crc kubenswrapper[4703]: I0130 12:33:39.050609 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-wbn8k"] Jan 30 12:33:39 crc kubenswrapper[4703]: I0130 12:33:39.101510 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40732e3a-846a-44ee-bf77-7c90e541af74" path="/var/lib/kubelet/pods/40732e3a-846a-44ee-bf77-7c90e541af74/volumes" Jan 30 12:33:39 crc kubenswrapper[4703]: I0130 12:33:39.102566 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93b18574-a5a2-4317-b40d-eb021590ac96" path="/var/lib/kubelet/pods/93b18574-a5a2-4317-b40d-eb021590ac96/volumes" Jan 30 12:33:52 crc kubenswrapper[4703]: I0130 12:33:52.087752 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:33:52 crc kubenswrapper[4703]: E0130 12:33:52.088958 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:33:53 crc kubenswrapper[4703]: I0130 12:33:53.845490 4703 scope.go:117] "RemoveContainer" containerID="f88fad3c745401aea84ad3bfafd365ae7f8cc354c2b585857b1eb141f5688b84" Jan 30 12:33:54 crc kubenswrapper[4703]: I0130 12:33:54.016763 4703 scope.go:117] "RemoveContainer" containerID="564df5fc426738750afcd0688e8875ff98cb65017665db03b209bc8c51021e9c" Jan 30 12:33:54 crc kubenswrapper[4703]: I0130 12:33:54.076021 4703 scope.go:117] "RemoveContainer" containerID="f07407a994b4eb22db96cc6f20185276cc4464eedc124843068a40cc0bc0699b" Jan 30 12:33:59 crc kubenswrapper[4703]: I0130 12:33:59.883057 4703 generic.go:334] "Generic (PLEG): container finished" podID="3dc7e43f-3f6e-4d19-970d-bd84acc019bd" containerID="a7bbaaa9ce8deb36c867d1bffb4249cc38b7698a73d3e06ee7240336d98dfc29" exitCode=0 Jan 30 12:33:59 crc kubenswrapper[4703]: I0130 12:33:59.883257 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" event={"ID":"3dc7e43f-3f6e-4d19-970d-bd84acc019bd","Type":"ContainerDied","Data":"a7bbaaa9ce8deb36c867d1bffb4249cc38b7698a73d3e06ee7240336d98dfc29"} Jan 30 12:34:01 crc kubenswrapper[4703]: I0130 12:34:01.375262 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" Jan 30 12:34:01 crc kubenswrapper[4703]: I0130 12:34:01.559011 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmr4d\" (UniqueName: \"kubernetes.io/projected/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-kube-api-access-tmr4d\") pod \"3dc7e43f-3f6e-4d19-970d-bd84acc019bd\" (UID: \"3dc7e43f-3f6e-4d19-970d-bd84acc019bd\") " Jan 30 12:34:01 crc kubenswrapper[4703]: I0130 12:34:01.559297 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-ssh-key-openstack-edpm-ipam\") pod \"3dc7e43f-3f6e-4d19-970d-bd84acc019bd\" (UID: \"3dc7e43f-3f6e-4d19-970d-bd84acc019bd\") " Jan 30 12:34:01 crc kubenswrapper[4703]: I0130 12:34:01.559327 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-inventory\") pod \"3dc7e43f-3f6e-4d19-970d-bd84acc019bd\" (UID: \"3dc7e43f-3f6e-4d19-970d-bd84acc019bd\") " Jan 30 12:34:01 crc kubenswrapper[4703]: I0130 12:34:01.568213 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-kube-api-access-tmr4d" (OuterVolumeSpecName: "kube-api-access-tmr4d") pod "3dc7e43f-3f6e-4d19-970d-bd84acc019bd" (UID: "3dc7e43f-3f6e-4d19-970d-bd84acc019bd"). InnerVolumeSpecName "kube-api-access-tmr4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:34:01 crc kubenswrapper[4703]: I0130 12:34:01.609438 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-inventory" (OuterVolumeSpecName: "inventory") pod "3dc7e43f-3f6e-4d19-970d-bd84acc019bd" (UID: "3dc7e43f-3f6e-4d19-970d-bd84acc019bd"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:34:01 crc kubenswrapper[4703]: I0130 12:34:01.610808 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "3dc7e43f-3f6e-4d19-970d-bd84acc019bd" (UID: "3dc7e43f-3f6e-4d19-970d-bd84acc019bd"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:34:01 crc kubenswrapper[4703]: I0130 12:34:01.662274 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmr4d\" (UniqueName: \"kubernetes.io/projected/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-kube-api-access-tmr4d\") on node \"crc\" DevicePath \"\"" Jan 30 12:34:01 crc kubenswrapper[4703]: I0130 12:34:01.662321 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:34:01 crc kubenswrapper[4703]: I0130 12:34:01.662334 4703 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3dc7e43f-3f6e-4d19-970d-bd84acc019bd-inventory\") on node \"crc\" DevicePath \"\"" Jan 30 12:34:01 crc kubenswrapper[4703]: I0130 12:34:01.908976 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" event={"ID":"3dc7e43f-3f6e-4d19-970d-bd84acc019bd","Type":"ContainerDied","Data":"27bfc07b01e83966f80dd92f9b04ea59a0c27aa135c6ad90f093f1367219ae4f"} Jan 30 12:34:01 crc kubenswrapper[4703]: I0130 12:34:01.909024 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-zbg8b" Jan 30 12:34:01 crc kubenswrapper[4703]: I0130 12:34:01.909044 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27bfc07b01e83966f80dd92f9b04ea59a0c27aa135c6ad90f093f1367219ae4f" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.063842 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7"] Jan 30 12:34:02 crc kubenswrapper[4703]: E0130 12:34:02.065089 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dc7e43f-3f6e-4d19-970d-bd84acc019bd" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.065151 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dc7e43f-3f6e-4d19-970d-bd84acc019bd" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.065467 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dc7e43f-3f6e-4d19-970d-bd84acc019bd" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.066844 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.076750 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.076919 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.077181 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.077181 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-jjdl5" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.078274 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7"] Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.174860 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dbcb8ade-a107-4435-8ee5-e27e4bb95998-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-42gg7\" (UID: \"dbcb8ade-a107-4435-8ee5-e27e4bb95998\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.174946 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsf6l\" (UniqueName: \"kubernetes.io/projected/dbcb8ade-a107-4435-8ee5-e27e4bb95998-kube-api-access-xsf6l\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-42gg7\" (UID: \"dbcb8ade-a107-4435-8ee5-e27e4bb95998\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.175624 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/dbcb8ade-a107-4435-8ee5-e27e4bb95998-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-42gg7\" (UID: \"dbcb8ade-a107-4435-8ee5-e27e4bb95998\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.278570 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dbcb8ade-a107-4435-8ee5-e27e4bb95998-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-42gg7\" (UID: \"dbcb8ade-a107-4435-8ee5-e27e4bb95998\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.278664 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsf6l\" (UniqueName: \"kubernetes.io/projected/dbcb8ade-a107-4435-8ee5-e27e4bb95998-kube-api-access-xsf6l\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-42gg7\" (UID: \"dbcb8ade-a107-4435-8ee5-e27e4bb95998\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.278836 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/dbcb8ade-a107-4435-8ee5-e27e4bb95998-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-42gg7\" (UID: \"dbcb8ade-a107-4435-8ee5-e27e4bb95998\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.285686 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dbcb8ade-a107-4435-8ee5-e27e4bb95998-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-42gg7\" (UID: \"dbcb8ade-a107-4435-8ee5-e27e4bb95998\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.286156 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/dbcb8ade-a107-4435-8ee5-e27e4bb95998-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-42gg7\" (UID: \"dbcb8ade-a107-4435-8ee5-e27e4bb95998\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.298175 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsf6l\" (UniqueName: \"kubernetes.io/projected/dbcb8ade-a107-4435-8ee5-e27e4bb95998-kube-api-access-xsf6l\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-42gg7\" (UID: \"dbcb8ade-a107-4435-8ee5-e27e4bb95998\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.388869 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" Jan 30 12:34:02 crc kubenswrapper[4703]: I0130 12:34:02.961450 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7"] Jan 30 12:34:03 crc kubenswrapper[4703]: I0130 12:34:03.960163 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" event={"ID":"dbcb8ade-a107-4435-8ee5-e27e4bb95998","Type":"ContainerStarted","Data":"12bda7940249c06720cf9995c1f47a4ee271b62dfeb8b1befb1fb13efa0e3ebd"} Jan 30 12:34:04 crc kubenswrapper[4703]: I0130 12:34:04.972767 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" event={"ID":"dbcb8ade-a107-4435-8ee5-e27e4bb95998","Type":"ContainerStarted","Data":"4a3c472bc578e2fab81b91790490aff7d1c52689bdbf7bbcd5335c1724620bf7"} Jan 30 12:34:04 crc kubenswrapper[4703]: I0130 12:34:04.993776 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" podStartSLOduration=1.964453346 podStartE2EDuration="2.993750163s" podCreationTimestamp="2026-01-30 12:34:02 +0000 UTC" firstStartedPulling="2026-01-30 12:34:02.969690249 +0000 UTC m=+2278.747511893" lastFinishedPulling="2026-01-30 12:34:03.998987056 +0000 UTC m=+2279.776808710" observedRunningTime="2026-01-30 12:34:04.988193195 +0000 UTC m=+2280.766014859" watchObservedRunningTime="2026-01-30 12:34:04.993750163 +0000 UTC m=+2280.771571817" Jan 30 12:34:07 crc kubenswrapper[4703]: I0130 12:34:07.086992 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:34:07 crc kubenswrapper[4703]: E0130 12:34:07.087835 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:34:19 crc kubenswrapper[4703]: I0130 12:34:19.235336 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6j7xc"] Jan 30 12:34:19 crc kubenswrapper[4703]: I0130 12:34:19.239311 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6j7xc" Jan 30 12:34:19 crc kubenswrapper[4703]: I0130 12:34:19.264347 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6j7xc"] Jan 30 12:34:19 crc kubenswrapper[4703]: I0130 12:34:19.336955 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e630863e-145f-46a3-bf90-37ec07c440da-catalog-content\") pod \"redhat-marketplace-6j7xc\" (UID: \"e630863e-145f-46a3-bf90-37ec07c440da\") " pod="openshift-marketplace/redhat-marketplace-6j7xc" Jan 30 12:34:19 crc kubenswrapper[4703]: I0130 12:34:19.337259 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e630863e-145f-46a3-bf90-37ec07c440da-utilities\") pod \"redhat-marketplace-6j7xc\" (UID: \"e630863e-145f-46a3-bf90-37ec07c440da\") " pod="openshift-marketplace/redhat-marketplace-6j7xc" Jan 30 12:34:19 crc kubenswrapper[4703]: I0130 12:34:19.337335 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7zqc\" (UniqueName: \"kubernetes.io/projected/e630863e-145f-46a3-bf90-37ec07c440da-kube-api-access-q7zqc\") pod \"redhat-marketplace-6j7xc\" (UID: \"e630863e-145f-46a3-bf90-37ec07c440da\") " pod="openshift-marketplace/redhat-marketplace-6j7xc" Jan 30 12:34:19 crc kubenswrapper[4703]: I0130 12:34:19.439755 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e630863e-145f-46a3-bf90-37ec07c440da-catalog-content\") pod \"redhat-marketplace-6j7xc\" (UID: \"e630863e-145f-46a3-bf90-37ec07c440da\") " pod="openshift-marketplace/redhat-marketplace-6j7xc" Jan 30 12:34:19 crc kubenswrapper[4703]: I0130 12:34:19.439981 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e630863e-145f-46a3-bf90-37ec07c440da-utilities\") pod \"redhat-marketplace-6j7xc\" (UID: \"e630863e-145f-46a3-bf90-37ec07c440da\") " pod="openshift-marketplace/redhat-marketplace-6j7xc" Jan 30 12:34:19 crc kubenswrapper[4703]: I0130 12:34:19.440051 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7zqc\" (UniqueName: \"kubernetes.io/projected/e630863e-145f-46a3-bf90-37ec07c440da-kube-api-access-q7zqc\") pod \"redhat-marketplace-6j7xc\" (UID: \"e630863e-145f-46a3-bf90-37ec07c440da\") " pod="openshift-marketplace/redhat-marketplace-6j7xc" Jan 30 12:34:19 crc kubenswrapper[4703]: I0130 12:34:19.440403 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e630863e-145f-46a3-bf90-37ec07c440da-catalog-content\") pod \"redhat-marketplace-6j7xc\" (UID: \"e630863e-145f-46a3-bf90-37ec07c440da\") " pod="openshift-marketplace/redhat-marketplace-6j7xc" Jan 30 12:34:19 crc kubenswrapper[4703]: I0130 12:34:19.440644 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e630863e-145f-46a3-bf90-37ec07c440da-utilities\") pod \"redhat-marketplace-6j7xc\" (UID: \"e630863e-145f-46a3-bf90-37ec07c440da\") " pod="openshift-marketplace/redhat-marketplace-6j7xc" Jan 30 12:34:19 crc kubenswrapper[4703]: I0130 12:34:19.467254 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7zqc\" (UniqueName: \"kubernetes.io/projected/e630863e-145f-46a3-bf90-37ec07c440da-kube-api-access-q7zqc\") pod \"redhat-marketplace-6j7xc\" (UID: \"e630863e-145f-46a3-bf90-37ec07c440da\") " pod="openshift-marketplace/redhat-marketplace-6j7xc" Jan 30 12:34:19 crc kubenswrapper[4703]: I0130 12:34:19.571392 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6j7xc" Jan 30 12:34:20 crc kubenswrapper[4703]: I0130 12:34:20.087144 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:34:20 crc kubenswrapper[4703]: E0130 12:34:20.087999 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:34:20 crc kubenswrapper[4703]: I0130 12:34:20.194458 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6j7xc"] Jan 30 12:34:20 crc kubenswrapper[4703]: W0130 12:34:20.201325 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode630863e_145f_46a3_bf90_37ec07c440da.slice/crio-a5d8f773ee44b810694009b4d162b66e842d450a28d28a47a91ce8e2d0f79883 WatchSource:0}: Error finding container a5d8f773ee44b810694009b4d162b66e842d450a28d28a47a91ce8e2d0f79883: Status 404 returned error can't find the container with id a5d8f773ee44b810694009b4d162b66e842d450a28d28a47a91ce8e2d0f79883 Jan 30 12:34:21 crc kubenswrapper[4703]: I0130 12:34:21.139024 4703 generic.go:334] "Generic (PLEG): container finished" podID="e630863e-145f-46a3-bf90-37ec07c440da" containerID="a70be92c5faeaed7f6e1ce526ed14902ff185ffdecbf3571ea7f25181473b693" exitCode=0 Jan 30 12:34:21 crc kubenswrapper[4703]: I0130 12:34:21.139136 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6j7xc" event={"ID":"e630863e-145f-46a3-bf90-37ec07c440da","Type":"ContainerDied","Data":"a70be92c5faeaed7f6e1ce526ed14902ff185ffdecbf3571ea7f25181473b693"} Jan 30 12:34:21 crc kubenswrapper[4703]: I0130 12:34:21.139426 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6j7xc" event={"ID":"e630863e-145f-46a3-bf90-37ec07c440da","Type":"ContainerStarted","Data":"a5d8f773ee44b810694009b4d162b66e842d450a28d28a47a91ce8e2d0f79883"} Jan 30 12:34:23 crc kubenswrapper[4703]: I0130 12:34:23.165936 4703 generic.go:334] "Generic (PLEG): container finished" podID="e630863e-145f-46a3-bf90-37ec07c440da" containerID="7ba723344e4361de65aed1793ab8fcdc3ab909f69b6e6288199a26a447d54c5e" exitCode=0 Jan 30 12:34:23 crc kubenswrapper[4703]: I0130 12:34:23.166031 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6j7xc" event={"ID":"e630863e-145f-46a3-bf90-37ec07c440da","Type":"ContainerDied","Data":"7ba723344e4361de65aed1793ab8fcdc3ab909f69b6e6288199a26a447d54c5e"} Jan 30 12:34:24 crc kubenswrapper[4703]: I0130 12:34:24.060759 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-gxvg2"] Jan 30 12:34:24 crc kubenswrapper[4703]: I0130 12:34:24.071749 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-gxvg2"] Jan 30 12:34:24 crc kubenswrapper[4703]: I0130 12:34:24.183078 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6j7xc" event={"ID":"e630863e-145f-46a3-bf90-37ec07c440da","Type":"ContainerStarted","Data":"3e97c6fa2cb7e071bf913a2027f0a70eaab635c428efe8fd229c551d40a0df89"} Jan 30 12:34:24 crc kubenswrapper[4703]: I0130 12:34:24.212692 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6j7xc" podStartSLOduration=2.732674645 podStartE2EDuration="5.212664407s" podCreationTimestamp="2026-01-30 12:34:19 +0000 UTC" firstStartedPulling="2026-01-30 12:34:21.143468183 +0000 UTC m=+2296.921289847" lastFinishedPulling="2026-01-30 12:34:23.623457955 +0000 UTC m=+2299.401279609" observedRunningTime="2026-01-30 12:34:24.207448338 +0000 UTC m=+2299.985270022" watchObservedRunningTime="2026-01-30 12:34:24.212664407 +0000 UTC m=+2299.990486061" Jan 30 12:34:25 crc kubenswrapper[4703]: I0130 12:34:25.101315 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38c055a8-b3f5-4648-acf9-8e0bdc3b4c51" path="/var/lib/kubelet/pods/38c055a8-b3f5-4648-acf9-8e0bdc3b4c51/volumes" Jan 30 12:34:29 crc kubenswrapper[4703]: I0130 12:34:29.571493 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6j7xc" Jan 30 12:34:29 crc kubenswrapper[4703]: I0130 12:34:29.573337 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6j7xc" Jan 30 12:34:29 crc kubenswrapper[4703]: I0130 12:34:29.630661 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6j7xc" Jan 30 12:34:30 crc kubenswrapper[4703]: I0130 12:34:30.301388 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6j7xc" Jan 30 12:34:30 crc kubenswrapper[4703]: I0130 12:34:30.371066 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6j7xc"] Jan 30 12:34:32 crc kubenswrapper[4703]: I0130 12:34:32.391279 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-6j7xc" podUID="e630863e-145f-46a3-bf90-37ec07c440da" containerName="registry-server" containerID="cri-o://3e97c6fa2cb7e071bf913a2027f0a70eaab635c428efe8fd229c551d40a0df89" gracePeriod=2 Jan 30 12:34:32 crc kubenswrapper[4703]: I0130 12:34:32.929277 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6j7xc" Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.014576 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e630863e-145f-46a3-bf90-37ec07c440da-catalog-content\") pod \"e630863e-145f-46a3-bf90-37ec07c440da\" (UID: \"e630863e-145f-46a3-bf90-37ec07c440da\") " Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.014672 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e630863e-145f-46a3-bf90-37ec07c440da-utilities\") pod \"e630863e-145f-46a3-bf90-37ec07c440da\" (UID: \"e630863e-145f-46a3-bf90-37ec07c440da\") " Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.014836 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7zqc\" (UniqueName: \"kubernetes.io/projected/e630863e-145f-46a3-bf90-37ec07c440da-kube-api-access-q7zqc\") pod \"e630863e-145f-46a3-bf90-37ec07c440da\" (UID: \"e630863e-145f-46a3-bf90-37ec07c440da\") " Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.018087 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e630863e-145f-46a3-bf90-37ec07c440da-utilities" (OuterVolumeSpecName: "utilities") pod "e630863e-145f-46a3-bf90-37ec07c440da" (UID: "e630863e-145f-46a3-bf90-37ec07c440da"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.026753 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e630863e-145f-46a3-bf90-37ec07c440da-kube-api-access-q7zqc" (OuterVolumeSpecName: "kube-api-access-q7zqc") pod "e630863e-145f-46a3-bf90-37ec07c440da" (UID: "e630863e-145f-46a3-bf90-37ec07c440da"). InnerVolumeSpecName "kube-api-access-q7zqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.043647 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e630863e-145f-46a3-bf90-37ec07c440da-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e630863e-145f-46a3-bf90-37ec07c440da" (UID: "e630863e-145f-46a3-bf90-37ec07c440da"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.117653 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7zqc\" (UniqueName: \"kubernetes.io/projected/e630863e-145f-46a3-bf90-37ec07c440da-kube-api-access-q7zqc\") on node \"crc\" DevicePath \"\"" Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.117693 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e630863e-145f-46a3-bf90-37ec07c440da-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.117707 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e630863e-145f-46a3-bf90-37ec07c440da-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.405761 4703 generic.go:334] "Generic (PLEG): container finished" podID="e630863e-145f-46a3-bf90-37ec07c440da" containerID="3e97c6fa2cb7e071bf913a2027f0a70eaab635c428efe8fd229c551d40a0df89" exitCode=0 Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.405855 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6j7xc" event={"ID":"e630863e-145f-46a3-bf90-37ec07c440da","Type":"ContainerDied","Data":"3e97c6fa2cb7e071bf913a2027f0a70eaab635c428efe8fd229c551d40a0df89"} Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.405889 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6j7xc" Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.405944 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6j7xc" event={"ID":"e630863e-145f-46a3-bf90-37ec07c440da","Type":"ContainerDied","Data":"a5d8f773ee44b810694009b4d162b66e842d450a28d28a47a91ce8e2d0f79883"} Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.405969 4703 scope.go:117] "RemoveContainer" containerID="3e97c6fa2cb7e071bf913a2027f0a70eaab635c428efe8fd229c551d40a0df89" Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.430762 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-6j7xc"] Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.441669 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-6j7xc"] Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.447596 4703 scope.go:117] "RemoveContainer" containerID="7ba723344e4361de65aed1793ab8fcdc3ab909f69b6e6288199a26a447d54c5e" Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.473983 4703 scope.go:117] "RemoveContainer" containerID="a70be92c5faeaed7f6e1ce526ed14902ff185ffdecbf3571ea7f25181473b693" Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.638638 4703 scope.go:117] "RemoveContainer" containerID="3e97c6fa2cb7e071bf913a2027f0a70eaab635c428efe8fd229c551d40a0df89" Jan 30 12:34:33 crc kubenswrapper[4703]: E0130 12:34:33.639813 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e97c6fa2cb7e071bf913a2027f0a70eaab635c428efe8fd229c551d40a0df89\": container with ID starting with 3e97c6fa2cb7e071bf913a2027f0a70eaab635c428efe8fd229c551d40a0df89 not found: ID does not exist" containerID="3e97c6fa2cb7e071bf913a2027f0a70eaab635c428efe8fd229c551d40a0df89" Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.639848 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e97c6fa2cb7e071bf913a2027f0a70eaab635c428efe8fd229c551d40a0df89"} err="failed to get container status \"3e97c6fa2cb7e071bf913a2027f0a70eaab635c428efe8fd229c551d40a0df89\": rpc error: code = NotFound desc = could not find container \"3e97c6fa2cb7e071bf913a2027f0a70eaab635c428efe8fd229c551d40a0df89\": container with ID starting with 3e97c6fa2cb7e071bf913a2027f0a70eaab635c428efe8fd229c551d40a0df89 not found: ID does not exist" Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.639877 4703 scope.go:117] "RemoveContainer" containerID="7ba723344e4361de65aed1793ab8fcdc3ab909f69b6e6288199a26a447d54c5e" Jan 30 12:34:33 crc kubenswrapper[4703]: E0130 12:34:33.640563 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ba723344e4361de65aed1793ab8fcdc3ab909f69b6e6288199a26a447d54c5e\": container with ID starting with 7ba723344e4361de65aed1793ab8fcdc3ab909f69b6e6288199a26a447d54c5e not found: ID does not exist" containerID="7ba723344e4361de65aed1793ab8fcdc3ab909f69b6e6288199a26a447d54c5e" Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.640617 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ba723344e4361de65aed1793ab8fcdc3ab909f69b6e6288199a26a447d54c5e"} err="failed to get container status \"7ba723344e4361de65aed1793ab8fcdc3ab909f69b6e6288199a26a447d54c5e\": rpc error: code = NotFound desc = could not find container \"7ba723344e4361de65aed1793ab8fcdc3ab909f69b6e6288199a26a447d54c5e\": container with ID starting with 7ba723344e4361de65aed1793ab8fcdc3ab909f69b6e6288199a26a447d54c5e not found: ID does not exist" Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.640637 4703 scope.go:117] "RemoveContainer" containerID="a70be92c5faeaed7f6e1ce526ed14902ff185ffdecbf3571ea7f25181473b693" Jan 30 12:34:33 crc kubenswrapper[4703]: E0130 12:34:33.641154 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a70be92c5faeaed7f6e1ce526ed14902ff185ffdecbf3571ea7f25181473b693\": container with ID starting with a70be92c5faeaed7f6e1ce526ed14902ff185ffdecbf3571ea7f25181473b693 not found: ID does not exist" containerID="a70be92c5faeaed7f6e1ce526ed14902ff185ffdecbf3571ea7f25181473b693" Jan 30 12:34:33 crc kubenswrapper[4703]: I0130 12:34:33.641191 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a70be92c5faeaed7f6e1ce526ed14902ff185ffdecbf3571ea7f25181473b693"} err="failed to get container status \"a70be92c5faeaed7f6e1ce526ed14902ff185ffdecbf3571ea7f25181473b693\": rpc error: code = NotFound desc = could not find container \"a70be92c5faeaed7f6e1ce526ed14902ff185ffdecbf3571ea7f25181473b693\": container with ID starting with a70be92c5faeaed7f6e1ce526ed14902ff185ffdecbf3571ea7f25181473b693 not found: ID does not exist" Jan 30 12:34:35 crc kubenswrapper[4703]: I0130 12:34:35.094277 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:34:35 crc kubenswrapper[4703]: E0130 12:34:35.095141 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:34:35 crc kubenswrapper[4703]: I0130 12:34:35.098988 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e630863e-145f-46a3-bf90-37ec07c440da" path="/var/lib/kubelet/pods/e630863e-145f-46a3-bf90-37ec07c440da/volumes" Jan 30 12:34:49 crc kubenswrapper[4703]: I0130 12:34:49.088055 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:34:49 crc kubenswrapper[4703]: E0130 12:34:49.089372 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:34:54 crc kubenswrapper[4703]: I0130 12:34:54.231867 4703 scope.go:117] "RemoveContainer" containerID="253a146d77ad5c7781999b3fb1c220eefbd487e84ece1f3a1c3a39c289c862f8" Jan 30 12:34:54 crc kubenswrapper[4703]: I0130 12:34:54.632718 4703 generic.go:334] "Generic (PLEG): container finished" podID="dbcb8ade-a107-4435-8ee5-e27e4bb95998" containerID="4a3c472bc578e2fab81b91790490aff7d1c52689bdbf7bbcd5335c1724620bf7" exitCode=0 Jan 30 12:34:54 crc kubenswrapper[4703]: I0130 12:34:54.632822 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" event={"ID":"dbcb8ade-a107-4435-8ee5-e27e4bb95998","Type":"ContainerDied","Data":"4a3c472bc578e2fab81b91790490aff7d1c52689bdbf7bbcd5335c1724620bf7"} Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.106975 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.262570 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dbcb8ade-a107-4435-8ee5-e27e4bb95998-inventory\") pod \"dbcb8ade-a107-4435-8ee5-e27e4bb95998\" (UID: \"dbcb8ade-a107-4435-8ee5-e27e4bb95998\") " Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.262702 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/dbcb8ade-a107-4435-8ee5-e27e4bb95998-ssh-key-openstack-edpm-ipam\") pod \"dbcb8ade-a107-4435-8ee5-e27e4bb95998\" (UID: \"dbcb8ade-a107-4435-8ee5-e27e4bb95998\") " Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.262790 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xsf6l\" (UniqueName: \"kubernetes.io/projected/dbcb8ade-a107-4435-8ee5-e27e4bb95998-kube-api-access-xsf6l\") pod \"dbcb8ade-a107-4435-8ee5-e27e4bb95998\" (UID: \"dbcb8ade-a107-4435-8ee5-e27e4bb95998\") " Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.270534 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbcb8ade-a107-4435-8ee5-e27e4bb95998-kube-api-access-xsf6l" (OuterVolumeSpecName: "kube-api-access-xsf6l") pod "dbcb8ade-a107-4435-8ee5-e27e4bb95998" (UID: "dbcb8ade-a107-4435-8ee5-e27e4bb95998"). InnerVolumeSpecName "kube-api-access-xsf6l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.299735 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbcb8ade-a107-4435-8ee5-e27e4bb95998-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "dbcb8ade-a107-4435-8ee5-e27e4bb95998" (UID: "dbcb8ade-a107-4435-8ee5-e27e4bb95998"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.300037 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbcb8ade-a107-4435-8ee5-e27e4bb95998-inventory" (OuterVolumeSpecName: "inventory") pod "dbcb8ade-a107-4435-8ee5-e27e4bb95998" (UID: "dbcb8ade-a107-4435-8ee5-e27e4bb95998"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.365990 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xsf6l\" (UniqueName: \"kubernetes.io/projected/dbcb8ade-a107-4435-8ee5-e27e4bb95998-kube-api-access-xsf6l\") on node \"crc\" DevicePath \"\"" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.366031 4703 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dbcb8ade-a107-4435-8ee5-e27e4bb95998-inventory\") on node \"crc\" DevicePath \"\"" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.366043 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/dbcb8ade-a107-4435-8ee5-e27e4bb95998-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.659108 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" event={"ID":"dbcb8ade-a107-4435-8ee5-e27e4bb95998","Type":"ContainerDied","Data":"12bda7940249c06720cf9995c1f47a4ee271b62dfeb8b1befb1fb13efa0e3ebd"} Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.659200 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="12bda7940249c06720cf9995c1f47a4ee271b62dfeb8b1befb1fb13efa0e3ebd" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.659435 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-42gg7" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.769583 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-7x7vm"] Jan 30 12:34:56 crc kubenswrapper[4703]: E0130 12:34:56.773160 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbcb8ade-a107-4435-8ee5-e27e4bb95998" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.773187 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbcb8ade-a107-4435-8ee5-e27e4bb95998" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 30 12:34:56 crc kubenswrapper[4703]: E0130 12:34:56.773198 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e630863e-145f-46a3-bf90-37ec07c440da" containerName="extract-utilities" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.773206 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="e630863e-145f-46a3-bf90-37ec07c440da" containerName="extract-utilities" Jan 30 12:34:56 crc kubenswrapper[4703]: E0130 12:34:56.773229 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e630863e-145f-46a3-bf90-37ec07c440da" containerName="registry-server" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.773240 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="e630863e-145f-46a3-bf90-37ec07c440da" containerName="registry-server" Jan 30 12:34:56 crc kubenswrapper[4703]: E0130 12:34:56.773257 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e630863e-145f-46a3-bf90-37ec07c440da" containerName="extract-content" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.773265 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="e630863e-145f-46a3-bf90-37ec07c440da" containerName="extract-content" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.773485 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="e630863e-145f-46a3-bf90-37ec07c440da" containerName="registry-server" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.773502 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbcb8ade-a107-4435-8ee5-e27e4bb95998" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.775635 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.779033 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.779619 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.779907 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-jjdl5" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.780731 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.790225 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-7x7vm"] Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.981102 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxqzr\" (UniqueName: \"kubernetes.io/projected/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-kube-api-access-xxqzr\") pod \"ssh-known-hosts-edpm-deployment-7x7vm\" (UID: \"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820\") " pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.981846 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-7x7vm\" (UID: \"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820\") " pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" Jan 30 12:34:56 crc kubenswrapper[4703]: I0130 12:34:56.982116 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-7x7vm\" (UID: \"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820\") " pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" Jan 30 12:34:57 crc kubenswrapper[4703]: I0130 12:34:57.083887 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxqzr\" (UniqueName: \"kubernetes.io/projected/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-kube-api-access-xxqzr\") pod \"ssh-known-hosts-edpm-deployment-7x7vm\" (UID: \"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820\") " pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" Jan 30 12:34:57 crc kubenswrapper[4703]: I0130 12:34:57.083982 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-7x7vm\" (UID: \"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820\") " pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" Jan 30 12:34:57 crc kubenswrapper[4703]: I0130 12:34:57.084026 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-7x7vm\" (UID: \"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820\") " pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" Jan 30 12:34:57 crc kubenswrapper[4703]: I0130 12:34:57.090400 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-7x7vm\" (UID: \"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820\") " pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" Jan 30 12:34:57 crc kubenswrapper[4703]: I0130 12:34:57.090731 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-7x7vm\" (UID: \"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820\") " pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" Jan 30 12:34:57 crc kubenswrapper[4703]: I0130 12:34:57.114804 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxqzr\" (UniqueName: \"kubernetes.io/projected/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-kube-api-access-xxqzr\") pod \"ssh-known-hosts-edpm-deployment-7x7vm\" (UID: \"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820\") " pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" Jan 30 12:34:57 crc kubenswrapper[4703]: I0130 12:34:57.402467 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" Jan 30 12:34:58 crc kubenswrapper[4703]: I0130 12:34:58.008733 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-7x7vm"] Jan 30 12:34:58 crc kubenswrapper[4703]: I0130 12:34:58.680052 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" event={"ID":"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820","Type":"ContainerStarted","Data":"6638de107871fbf3d67bf12249f2e016bd7a26c61c35ff6deeb19046b85b4f63"} Jan 30 12:34:59 crc kubenswrapper[4703]: I0130 12:34:59.695253 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" event={"ID":"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820","Type":"ContainerStarted","Data":"42648d6a5031c71b3d3559c433d5d2bd155006d0114b9198c84b2d76df176429"} Jan 30 12:34:59 crc kubenswrapper[4703]: I0130 12:34:59.726554 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" podStartSLOduration=3.245721895 podStartE2EDuration="3.726516896s" podCreationTimestamp="2026-01-30 12:34:56 +0000 UTC" firstStartedPulling="2026-01-30 12:34:58.018337502 +0000 UTC m=+2333.796159156" lastFinishedPulling="2026-01-30 12:34:58.499132513 +0000 UTC m=+2334.276954157" observedRunningTime="2026-01-30 12:34:59.72368789 +0000 UTC m=+2335.501509564" watchObservedRunningTime="2026-01-30 12:34:59.726516896 +0000 UTC m=+2335.504338540" Jan 30 12:35:03 crc kubenswrapper[4703]: I0130 12:35:03.086673 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:35:03 crc kubenswrapper[4703]: E0130 12:35:03.088147 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:35:05 crc kubenswrapper[4703]: I0130 12:35:05.769914 4703 generic.go:334] "Generic (PLEG): container finished" podID="ead7b76f-fd6a-41eb-9787-a9a1cb3b5820" containerID="42648d6a5031c71b3d3559c433d5d2bd155006d0114b9198c84b2d76df176429" exitCode=0 Jan 30 12:35:05 crc kubenswrapper[4703]: I0130 12:35:05.770134 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" event={"ID":"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820","Type":"ContainerDied","Data":"42648d6a5031c71b3d3559c433d5d2bd155006d0114b9198c84b2d76df176429"} Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.213017 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.361154 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-inventory-0\") pod \"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820\" (UID: \"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820\") " Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.361365 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxqzr\" (UniqueName: \"kubernetes.io/projected/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-kube-api-access-xxqzr\") pod \"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820\" (UID: \"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820\") " Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.361504 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-ssh-key-openstack-edpm-ipam\") pod \"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820\" (UID: \"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820\") " Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.368716 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-kube-api-access-xxqzr" (OuterVolumeSpecName: "kube-api-access-xxqzr") pod "ead7b76f-fd6a-41eb-9787-a9a1cb3b5820" (UID: "ead7b76f-fd6a-41eb-9787-a9a1cb3b5820"). InnerVolumeSpecName "kube-api-access-xxqzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.394468 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "ead7b76f-fd6a-41eb-9787-a9a1cb3b5820" (UID: "ead7b76f-fd6a-41eb-9787-a9a1cb3b5820"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.402001 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "ead7b76f-fd6a-41eb-9787-a9a1cb3b5820" (UID: "ead7b76f-fd6a-41eb-9787-a9a1cb3b5820"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.465713 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.465760 4703 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-inventory-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.465774 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxqzr\" (UniqueName: \"kubernetes.io/projected/ead7b76f-fd6a-41eb-9787-a9a1cb3b5820-kube-api-access-xxqzr\") on node \"crc\" DevicePath \"\"" Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.793904 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" event={"ID":"ead7b76f-fd6a-41eb-9787-a9a1cb3b5820","Type":"ContainerDied","Data":"6638de107871fbf3d67bf12249f2e016bd7a26c61c35ff6deeb19046b85b4f63"} Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.793961 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6638de107871fbf3d67bf12249f2e016bd7a26c61c35ff6deeb19046b85b4f63" Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.794013 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-7x7vm" Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.913686 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6"] Jan 30 12:35:07 crc kubenswrapper[4703]: E0130 12:35:07.914324 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ead7b76f-fd6a-41eb-9787-a9a1cb3b5820" containerName="ssh-known-hosts-edpm-deployment" Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.914344 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="ead7b76f-fd6a-41eb-9787-a9a1cb3b5820" containerName="ssh-known-hosts-edpm-deployment" Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.914651 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="ead7b76f-fd6a-41eb-9787-a9a1cb3b5820" containerName="ssh-known-hosts-edpm-deployment" Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.915923 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.919967 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.920042 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.920107 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-jjdl5" Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.921751 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 30 12:35:07 crc kubenswrapper[4703]: I0130 12:35:07.927515 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6"] Jan 30 12:35:08 crc kubenswrapper[4703]: I0130 12:35:08.080427 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gfckh\" (UniqueName: \"kubernetes.io/projected/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-kube-api-access-gfckh\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-x9xc6\" (UID: \"cc1e24eb-d37d-4ce9-afaa-2af8105a976e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" Jan 30 12:35:08 crc kubenswrapper[4703]: I0130 12:35:08.081200 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-x9xc6\" (UID: \"cc1e24eb-d37d-4ce9-afaa-2af8105a976e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" Jan 30 12:35:08 crc kubenswrapper[4703]: I0130 12:35:08.081299 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-x9xc6\" (UID: \"cc1e24eb-d37d-4ce9-afaa-2af8105a976e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" Jan 30 12:35:08 crc kubenswrapper[4703]: I0130 12:35:08.183855 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gfckh\" (UniqueName: \"kubernetes.io/projected/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-kube-api-access-gfckh\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-x9xc6\" (UID: \"cc1e24eb-d37d-4ce9-afaa-2af8105a976e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" Jan 30 12:35:08 crc kubenswrapper[4703]: I0130 12:35:08.183997 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-x9xc6\" (UID: \"cc1e24eb-d37d-4ce9-afaa-2af8105a976e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" Jan 30 12:35:08 crc kubenswrapper[4703]: I0130 12:35:08.184081 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-x9xc6\" (UID: \"cc1e24eb-d37d-4ce9-afaa-2af8105a976e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" Jan 30 12:35:08 crc kubenswrapper[4703]: I0130 12:35:08.195691 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-x9xc6\" (UID: \"cc1e24eb-d37d-4ce9-afaa-2af8105a976e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" Jan 30 12:35:08 crc kubenswrapper[4703]: I0130 12:35:08.200165 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-x9xc6\" (UID: \"cc1e24eb-d37d-4ce9-afaa-2af8105a976e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" Jan 30 12:35:08 crc kubenswrapper[4703]: I0130 12:35:08.209562 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gfckh\" (UniqueName: \"kubernetes.io/projected/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-kube-api-access-gfckh\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-x9xc6\" (UID: \"cc1e24eb-d37d-4ce9-afaa-2af8105a976e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" Jan 30 12:35:08 crc kubenswrapper[4703]: I0130 12:35:08.250217 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" Jan 30 12:35:08 crc kubenswrapper[4703]: I0130 12:35:08.788563 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6"] Jan 30 12:35:08 crc kubenswrapper[4703]: I0130 12:35:08.807464 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" event={"ID":"cc1e24eb-d37d-4ce9-afaa-2af8105a976e","Type":"ContainerStarted","Data":"90f799b7c2f31a55fc29b798abf2bb7c921899a853b40ca783aeaf5a08eeb83a"} Jan 30 12:35:10 crc kubenswrapper[4703]: I0130 12:35:10.828053 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" event={"ID":"cc1e24eb-d37d-4ce9-afaa-2af8105a976e","Type":"ContainerStarted","Data":"5274123f2b0fb8b9ce346ff9cf2e074078795b09281a9936dac991af9eb6212e"} Jan 30 12:35:10 crc kubenswrapper[4703]: I0130 12:35:10.853028 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" podStartSLOduration=2.817602418 podStartE2EDuration="3.85299941s" podCreationTimestamp="2026-01-30 12:35:07 +0000 UTC" firstStartedPulling="2026-01-30 12:35:08.797656871 +0000 UTC m=+2344.575478525" lastFinishedPulling="2026-01-30 12:35:09.833053863 +0000 UTC m=+2345.610875517" observedRunningTime="2026-01-30 12:35:10.849749043 +0000 UTC m=+2346.627570697" watchObservedRunningTime="2026-01-30 12:35:10.85299941 +0000 UTC m=+2346.630821064" Jan 30 12:35:17 crc kubenswrapper[4703]: I0130 12:35:17.087011 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:35:17 crc kubenswrapper[4703]: E0130 12:35:17.089797 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:35:19 crc kubenswrapper[4703]: I0130 12:35:19.930386 4703 generic.go:334] "Generic (PLEG): container finished" podID="cc1e24eb-d37d-4ce9-afaa-2af8105a976e" containerID="5274123f2b0fb8b9ce346ff9cf2e074078795b09281a9936dac991af9eb6212e" exitCode=0 Jan 30 12:35:19 crc kubenswrapper[4703]: I0130 12:35:19.930504 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" event={"ID":"cc1e24eb-d37d-4ce9-afaa-2af8105a976e","Type":"ContainerDied","Data":"5274123f2b0fb8b9ce346ff9cf2e074078795b09281a9936dac991af9eb6212e"} Jan 30 12:35:21 crc kubenswrapper[4703]: I0130 12:35:21.405101 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" Jan 30 12:35:21 crc kubenswrapper[4703]: I0130 12:35:21.538600 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-inventory\") pod \"cc1e24eb-d37d-4ce9-afaa-2af8105a976e\" (UID: \"cc1e24eb-d37d-4ce9-afaa-2af8105a976e\") " Jan 30 12:35:21 crc kubenswrapper[4703]: I0130 12:35:21.538864 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-ssh-key-openstack-edpm-ipam\") pod \"cc1e24eb-d37d-4ce9-afaa-2af8105a976e\" (UID: \"cc1e24eb-d37d-4ce9-afaa-2af8105a976e\") " Jan 30 12:35:21 crc kubenswrapper[4703]: I0130 12:35:21.539036 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gfckh\" (UniqueName: \"kubernetes.io/projected/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-kube-api-access-gfckh\") pod \"cc1e24eb-d37d-4ce9-afaa-2af8105a976e\" (UID: \"cc1e24eb-d37d-4ce9-afaa-2af8105a976e\") " Jan 30 12:35:21 crc kubenswrapper[4703]: I0130 12:35:21.546952 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-kube-api-access-gfckh" (OuterVolumeSpecName: "kube-api-access-gfckh") pod "cc1e24eb-d37d-4ce9-afaa-2af8105a976e" (UID: "cc1e24eb-d37d-4ce9-afaa-2af8105a976e"). InnerVolumeSpecName "kube-api-access-gfckh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:35:21 crc kubenswrapper[4703]: I0130 12:35:21.571248 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-inventory" (OuterVolumeSpecName: "inventory") pod "cc1e24eb-d37d-4ce9-afaa-2af8105a976e" (UID: "cc1e24eb-d37d-4ce9-afaa-2af8105a976e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:35:21 crc kubenswrapper[4703]: I0130 12:35:21.573952 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "cc1e24eb-d37d-4ce9-afaa-2af8105a976e" (UID: "cc1e24eb-d37d-4ce9-afaa-2af8105a976e"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:35:21 crc kubenswrapper[4703]: I0130 12:35:21.641460 4703 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-inventory\") on node \"crc\" DevicePath \"\"" Jan 30 12:35:21 crc kubenswrapper[4703]: I0130 12:35:21.641810 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:35:21 crc kubenswrapper[4703]: I0130 12:35:21.641822 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gfckh\" (UniqueName: \"kubernetes.io/projected/cc1e24eb-d37d-4ce9-afaa-2af8105a976e-kube-api-access-gfckh\") on node \"crc\" DevicePath \"\"" Jan 30 12:35:21 crc kubenswrapper[4703]: I0130 12:35:21.955443 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" event={"ID":"cc1e24eb-d37d-4ce9-afaa-2af8105a976e","Type":"ContainerDied","Data":"90f799b7c2f31a55fc29b798abf2bb7c921899a853b40ca783aeaf5a08eeb83a"} Jan 30 12:35:21 crc kubenswrapper[4703]: I0130 12:35:21.955507 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="90f799b7c2f31a55fc29b798abf2bb7c921899a853b40ca783aeaf5a08eeb83a" Jan 30 12:35:21 crc kubenswrapper[4703]: I0130 12:35:21.955516 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-x9xc6" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.047990 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9"] Jan 30 12:35:22 crc kubenswrapper[4703]: E0130 12:35:22.048678 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc1e24eb-d37d-4ce9-afaa-2af8105a976e" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.048709 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc1e24eb-d37d-4ce9-afaa-2af8105a976e" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.049013 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc1e24eb-d37d-4ce9-afaa-2af8105a976e" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.050096 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.064602 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.064644 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-jjdl5" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.064656 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.065044 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.071557 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9"] Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.153634 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2b4c4fdd-2e01-44c0-b655-364b653d45ae-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9\" (UID: \"2b4c4fdd-2e01-44c0-b655-364b653d45ae\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.154182 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vnxd\" (UniqueName: \"kubernetes.io/projected/2b4c4fdd-2e01-44c0-b655-364b653d45ae-kube-api-access-2vnxd\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9\" (UID: \"2b4c4fdd-2e01-44c0-b655-364b653d45ae\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.154734 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2b4c4fdd-2e01-44c0-b655-364b653d45ae-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9\" (UID: \"2b4c4fdd-2e01-44c0-b655-364b653d45ae\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.257602 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2b4c4fdd-2e01-44c0-b655-364b653d45ae-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9\" (UID: \"2b4c4fdd-2e01-44c0-b655-364b653d45ae\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.258273 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2b4c4fdd-2e01-44c0-b655-364b653d45ae-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9\" (UID: \"2b4c4fdd-2e01-44c0-b655-364b653d45ae\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.258319 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vnxd\" (UniqueName: \"kubernetes.io/projected/2b4c4fdd-2e01-44c0-b655-364b653d45ae-kube-api-access-2vnxd\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9\" (UID: \"2b4c4fdd-2e01-44c0-b655-364b653d45ae\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.265934 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2b4c4fdd-2e01-44c0-b655-364b653d45ae-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9\" (UID: \"2b4c4fdd-2e01-44c0-b655-364b653d45ae\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.266384 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2b4c4fdd-2e01-44c0-b655-364b653d45ae-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9\" (UID: \"2b4c4fdd-2e01-44c0-b655-364b653d45ae\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.277950 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vnxd\" (UniqueName: \"kubernetes.io/projected/2b4c4fdd-2e01-44c0-b655-364b653d45ae-kube-api-access-2vnxd\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9\" (UID: \"2b4c4fdd-2e01-44c0-b655-364b653d45ae\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.386529 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.925711 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9"] Jan 30 12:35:22 crc kubenswrapper[4703]: I0130 12:35:22.967536 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" event={"ID":"2b4c4fdd-2e01-44c0-b655-364b653d45ae","Type":"ContainerStarted","Data":"c5c03f58f56a7e5fdbf83f2fc028d693e820be5b0f4508fbb7363d202c4237e4"} Jan 30 12:35:24 crc kubenswrapper[4703]: I0130 12:35:24.993273 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" event={"ID":"2b4c4fdd-2e01-44c0-b655-364b653d45ae","Type":"ContainerStarted","Data":"8f519711f576fe0fba1c6c36a3352e08bc03ec4cee6227222acb4c6bc0779bc4"} Jan 30 12:35:25 crc kubenswrapper[4703]: I0130 12:35:25.020233 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" podStartSLOduration=1.993871681 podStartE2EDuration="3.02020185s" podCreationTimestamp="2026-01-30 12:35:22 +0000 UTC" firstStartedPulling="2026-01-30 12:35:22.935628328 +0000 UTC m=+2358.713449982" lastFinishedPulling="2026-01-30 12:35:23.961958457 +0000 UTC m=+2359.739780151" observedRunningTime="2026-01-30 12:35:25.01384257 +0000 UTC m=+2360.791664234" watchObservedRunningTime="2026-01-30 12:35:25.02020185 +0000 UTC m=+2360.798023504" Jan 30 12:35:28 crc kubenswrapper[4703]: I0130 12:35:28.087013 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:35:28 crc kubenswrapper[4703]: E0130 12:35:28.088750 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:35:34 crc kubenswrapper[4703]: I0130 12:35:34.092476 4703 generic.go:334] "Generic (PLEG): container finished" podID="2b4c4fdd-2e01-44c0-b655-364b653d45ae" containerID="8f519711f576fe0fba1c6c36a3352e08bc03ec4cee6227222acb4c6bc0779bc4" exitCode=0 Jan 30 12:35:34 crc kubenswrapper[4703]: I0130 12:35:34.092528 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" event={"ID":"2b4c4fdd-2e01-44c0-b655-364b653d45ae","Type":"ContainerDied","Data":"8f519711f576fe0fba1c6c36a3352e08bc03ec4cee6227222acb4c6bc0779bc4"} Jan 30 12:35:35 crc kubenswrapper[4703]: I0130 12:35:35.552495 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" Jan 30 12:35:35 crc kubenswrapper[4703]: I0130 12:35:35.708260 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vnxd\" (UniqueName: \"kubernetes.io/projected/2b4c4fdd-2e01-44c0-b655-364b653d45ae-kube-api-access-2vnxd\") pod \"2b4c4fdd-2e01-44c0-b655-364b653d45ae\" (UID: \"2b4c4fdd-2e01-44c0-b655-364b653d45ae\") " Jan 30 12:35:35 crc kubenswrapper[4703]: I0130 12:35:35.708695 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2b4c4fdd-2e01-44c0-b655-364b653d45ae-inventory\") pod \"2b4c4fdd-2e01-44c0-b655-364b653d45ae\" (UID: \"2b4c4fdd-2e01-44c0-b655-364b653d45ae\") " Jan 30 12:35:35 crc kubenswrapper[4703]: I0130 12:35:35.708816 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2b4c4fdd-2e01-44c0-b655-364b653d45ae-ssh-key-openstack-edpm-ipam\") pod \"2b4c4fdd-2e01-44c0-b655-364b653d45ae\" (UID: \"2b4c4fdd-2e01-44c0-b655-364b653d45ae\") " Jan 30 12:35:35 crc kubenswrapper[4703]: I0130 12:35:35.716581 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b4c4fdd-2e01-44c0-b655-364b653d45ae-kube-api-access-2vnxd" (OuterVolumeSpecName: "kube-api-access-2vnxd") pod "2b4c4fdd-2e01-44c0-b655-364b653d45ae" (UID: "2b4c4fdd-2e01-44c0-b655-364b653d45ae"). InnerVolumeSpecName "kube-api-access-2vnxd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:35:35 crc kubenswrapper[4703]: I0130 12:35:35.742987 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b4c4fdd-2e01-44c0-b655-364b653d45ae-inventory" (OuterVolumeSpecName: "inventory") pod "2b4c4fdd-2e01-44c0-b655-364b653d45ae" (UID: "2b4c4fdd-2e01-44c0-b655-364b653d45ae"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:35:35 crc kubenswrapper[4703]: I0130 12:35:35.753831 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b4c4fdd-2e01-44c0-b655-364b653d45ae-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "2b4c4fdd-2e01-44c0-b655-364b653d45ae" (UID: "2b4c4fdd-2e01-44c0-b655-364b653d45ae"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:35:35 crc kubenswrapper[4703]: I0130 12:35:35.812836 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vnxd\" (UniqueName: \"kubernetes.io/projected/2b4c4fdd-2e01-44c0-b655-364b653d45ae-kube-api-access-2vnxd\") on node \"crc\" DevicePath \"\"" Jan 30 12:35:35 crc kubenswrapper[4703]: I0130 12:35:35.812885 4703 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2b4c4fdd-2e01-44c0-b655-364b653d45ae-inventory\") on node \"crc\" DevicePath \"\"" Jan 30 12:35:35 crc kubenswrapper[4703]: I0130 12:35:35.812896 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/2b4c4fdd-2e01-44c0-b655-364b653d45ae-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.115419 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" event={"ID":"2b4c4fdd-2e01-44c0-b655-364b653d45ae","Type":"ContainerDied","Data":"c5c03f58f56a7e5fdbf83f2fc028d693e820be5b0f4508fbb7363d202c4237e4"} Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.116010 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c5c03f58f56a7e5fdbf83f2fc028d693e820be5b0f4508fbb7363d202c4237e4" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.115476 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.304407 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq"] Jan 30 12:35:36 crc kubenswrapper[4703]: E0130 12:35:36.305230 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b4c4fdd-2e01-44c0-b655-364b653d45ae" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.305309 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b4c4fdd-2e01-44c0-b655-364b653d45ae" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.305606 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b4c4fdd-2e01-44c0-b655-364b653d45ae" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.306496 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.311017 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.311936 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.312238 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.312469 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-jjdl5" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.313354 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.313529 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.349633 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq"] Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.353606 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.353618 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.436658 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.437169 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.437334 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.437466 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.437558 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.437679 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.437762 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.437913 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.438026 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.438237 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.438346 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w45qh\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-kube-api-access-w45qh\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.438467 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.438589 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.438693 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.541022 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.541866 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.541990 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w45qh\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-kube-api-access-w45qh\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.542188 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.542384 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.542522 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.542728 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.542859 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.543051 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.543267 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.543386 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.543520 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.543627 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.543874 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.554606 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.603782 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.604967 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.605842 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.618050 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.619207 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.619510 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.620111 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.621314 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.621548 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.621346 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.629380 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w45qh\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-kube-api-access-w45qh\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.640273 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.641086 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:36 crc kubenswrapper[4703]: I0130 12:35:36.932418 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:35:37 crc kubenswrapper[4703]: I0130 12:35:37.392566 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq"] Jan 30 12:35:38 crc kubenswrapper[4703]: I0130 12:35:38.140176 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" event={"ID":"9d2e464d-58a6-45d6-bc48-4bf88f246501","Type":"ContainerStarted","Data":"6e2b60943489ed1dd77eed4f8de9b8f8c0891552b8c3c290b74d05358a658bc0"} Jan 30 12:35:38 crc kubenswrapper[4703]: I0130 12:35:38.140910 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" event={"ID":"9d2e464d-58a6-45d6-bc48-4bf88f246501","Type":"ContainerStarted","Data":"aaf2cce3ec65e7ec0ab921c3cb6afbe544e4d6f7dc526d8bebca7277c648f355"} Jan 30 12:35:38 crc kubenswrapper[4703]: I0130 12:35:38.170872 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" podStartSLOduration=1.748530085 podStartE2EDuration="2.170839867s" podCreationTimestamp="2026-01-30 12:35:36 +0000 UTC" firstStartedPulling="2026-01-30 12:35:37.39317213 +0000 UTC m=+2373.170993784" lastFinishedPulling="2026-01-30 12:35:37.815481912 +0000 UTC m=+2373.593303566" observedRunningTime="2026-01-30 12:35:38.166797988 +0000 UTC m=+2373.944619652" watchObservedRunningTime="2026-01-30 12:35:38.170839867 +0000 UTC m=+2373.948661521" Jan 30 12:35:40 crc kubenswrapper[4703]: I0130 12:35:40.086617 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:35:40 crc kubenswrapper[4703]: E0130 12:35:40.088293 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:35:42 crc kubenswrapper[4703]: I0130 12:35:42.823503 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:35:42 crc kubenswrapper[4703]: I0130 12:35:42.824107 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:35:52 crc kubenswrapper[4703]: I0130 12:35:52.086356 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:35:53 crc kubenswrapper[4703]: I0130 12:35:53.313258 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerStarted","Data":"82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739"} Jan 30 12:35:56 crc kubenswrapper[4703]: I0130 12:35:56.034700 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:35:56 crc kubenswrapper[4703]: I0130 12:35:56.035067 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:35:56 crc kubenswrapper[4703]: E0130 12:35:56.035850 4703 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739 is running failed: container process not found" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 30 12:35:56 crc kubenswrapper[4703]: E0130 12:35:56.036771 4703 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739 is running failed: container process not found" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 30 12:35:56 crc kubenswrapper[4703]: E0130 12:35:56.037617 4703 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739 is running failed: container process not found" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 30 12:35:56 crc kubenswrapper[4703]: E0130 12:35:56.037704 4703 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739 is running failed: container process not found" probeType="Startup" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" containerName="nova-scheduler-scheduler" Jan 30 12:35:56 crc kubenswrapper[4703]: I0130 12:35:56.346351 4703 generic.go:334] "Generic (PLEG): container finished" podID="2fc19a6b-3cde-4bb5-9499-f5be846289da" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" exitCode=1 Jan 30 12:35:56 crc kubenswrapper[4703]: I0130 12:35:56.346425 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerDied","Data":"82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739"} Jan 30 12:35:56 crc kubenswrapper[4703]: I0130 12:35:56.346495 4703 scope.go:117] "RemoveContainer" containerID="1788715ed8e1b8ad00a0e3cfa4e517f77400bdcc193d5b7ce3dd2385cf0cb834" Jan 30 12:35:56 crc kubenswrapper[4703]: I0130 12:35:56.348648 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:35:56 crc kubenswrapper[4703]: E0130 12:35:56.349174 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:36:06 crc kubenswrapper[4703]: I0130 12:36:06.035092 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:36:06 crc kubenswrapper[4703]: I0130 12:36:06.036183 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:36:06 crc kubenswrapper[4703]: I0130 12:36:06.037662 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:36:06 crc kubenswrapper[4703]: E0130 12:36:06.038210 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:36:12 crc kubenswrapper[4703]: I0130 12:36:12.822791 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:36:12 crc kubenswrapper[4703]: I0130 12:36:12.823861 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:36:15 crc kubenswrapper[4703]: I0130 12:36:15.552965 4703 generic.go:334] "Generic (PLEG): container finished" podID="9d2e464d-58a6-45d6-bc48-4bf88f246501" containerID="6e2b60943489ed1dd77eed4f8de9b8f8c0891552b8c3c290b74d05358a658bc0" exitCode=0 Jan 30 12:36:15 crc kubenswrapper[4703]: I0130 12:36:15.553071 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" event={"ID":"9d2e464d-58a6-45d6-bc48-4bf88f246501","Type":"ContainerDied","Data":"6e2b60943489ed1dd77eed4f8de9b8f8c0891552b8c3c290b74d05358a658bc0"} Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.031494 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.230424 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-repo-setup-combined-ca-bundle\") pod \"9d2e464d-58a6-45d6-bc48-4bf88f246501\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.230961 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-telemetry-combined-ca-bundle\") pod \"9d2e464d-58a6-45d6-bc48-4bf88f246501\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.231182 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-ssh-key-openstack-edpm-ipam\") pod \"9d2e464d-58a6-45d6-bc48-4bf88f246501\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.231371 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-inventory\") pod \"9d2e464d-58a6-45d6-bc48-4bf88f246501\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.231551 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"9d2e464d-58a6-45d6-bc48-4bf88f246501\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.231700 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-ovn-default-certs-0\") pod \"9d2e464d-58a6-45d6-bc48-4bf88f246501\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.231805 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-libvirt-combined-ca-bundle\") pod \"9d2e464d-58a6-45d6-bc48-4bf88f246501\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.231967 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-neutron-metadata-combined-ca-bundle\") pod \"9d2e464d-58a6-45d6-bc48-4bf88f246501\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.232149 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"9d2e464d-58a6-45d6-bc48-4bf88f246501\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.232283 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-ovn-combined-ca-bundle\") pod \"9d2e464d-58a6-45d6-bc48-4bf88f246501\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.232419 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"9d2e464d-58a6-45d6-bc48-4bf88f246501\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.256348 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "9d2e464d-58a6-45d6-bc48-4bf88f246501" (UID: "9d2e464d-58a6-45d6-bc48-4bf88f246501"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.256580 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "9d2e464d-58a6-45d6-bc48-4bf88f246501" (UID: "9d2e464d-58a6-45d6-bc48-4bf88f246501"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.256848 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "9d2e464d-58a6-45d6-bc48-4bf88f246501" (UID: "9d2e464d-58a6-45d6-bc48-4bf88f246501"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.232529 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-nova-combined-ca-bundle\") pod \"9d2e464d-58a6-45d6-bc48-4bf88f246501\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.267094 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "9d2e464d-58a6-45d6-bc48-4bf88f246501" (UID: "9d2e464d-58a6-45d6-bc48-4bf88f246501"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.267560 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w45qh\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-kube-api-access-w45qh\") pod \"9d2e464d-58a6-45d6-bc48-4bf88f246501\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.267712 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-bootstrap-combined-ca-bundle\") pod \"9d2e464d-58a6-45d6-bc48-4bf88f246501\" (UID: \"9d2e464d-58a6-45d6-bc48-4bf88f246501\") " Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.268433 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-inventory" (OuterVolumeSpecName: "inventory") pod "9d2e464d-58a6-45d6-bc48-4bf88f246501" (UID: "9d2e464d-58a6-45d6-bc48-4bf88f246501"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.268373 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "9d2e464d-58a6-45d6-bc48-4bf88f246501" (UID: "9d2e464d-58a6-45d6-bc48-4bf88f246501"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.269364 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "9d2e464d-58a6-45d6-bc48-4bf88f246501" (UID: "9d2e464d-58a6-45d6-bc48-4bf88f246501"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.275359 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "9d2e464d-58a6-45d6-bc48-4bf88f246501" (UID: "9d2e464d-58a6-45d6-bc48-4bf88f246501"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.275435 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-kube-api-access-w45qh" (OuterVolumeSpecName: "kube-api-access-w45qh") pod "9d2e464d-58a6-45d6-bc48-4bf88f246501" (UID: "9d2e464d-58a6-45d6-bc48-4bf88f246501"). InnerVolumeSpecName "kube-api-access-w45qh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.276043 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "9d2e464d-58a6-45d6-bc48-4bf88f246501" (UID: "9d2e464d-58a6-45d6-bc48-4bf88f246501"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.280575 4703 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-inventory\") on node \"crc\" DevicePath \"\"" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.280605 4703 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.280638 4703 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.280662 4703 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.280677 4703 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.280690 4703 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.280727 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w45qh\" (UniqueName: \"kubernetes.io/projected/9d2e464d-58a6-45d6-bc48-4bf88f246501-kube-api-access-w45qh\") on node \"crc\" DevicePath \"\"" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.280739 4703 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.280750 4703 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.280761 4703 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.284481 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "9d2e464d-58a6-45d6-bc48-4bf88f246501" (UID: "9d2e464d-58a6-45d6-bc48-4bf88f246501"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.288546 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "9d2e464d-58a6-45d6-bc48-4bf88f246501" (UID: "9d2e464d-58a6-45d6-bc48-4bf88f246501"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.293359 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "9d2e464d-58a6-45d6-bc48-4bf88f246501" (UID: "9d2e464d-58a6-45d6-bc48-4bf88f246501"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.305359 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "9d2e464d-58a6-45d6-bc48-4bf88f246501" (UID: "9d2e464d-58a6-45d6-bc48-4bf88f246501"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.384092 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.384156 4703 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.384174 4703 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.384191 4703 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d2e464d-58a6-45d6-bc48-4bf88f246501-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.576809 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" event={"ID":"9d2e464d-58a6-45d6-bc48-4bf88f246501","Type":"ContainerDied","Data":"aaf2cce3ec65e7ec0ab921c3cb6afbe544e4d6f7dc526d8bebca7277c648f355"} Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.576895 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aaf2cce3ec65e7ec0ab921c3cb6afbe544e4d6f7dc526d8bebca7277c648f355" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.577021 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.698201 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2"] Jan 30 12:36:17 crc kubenswrapper[4703]: E0130 12:36:17.698812 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d2e464d-58a6-45d6-bc48-4bf88f246501" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.698842 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d2e464d-58a6-45d6-bc48-4bf88f246501" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.699163 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d2e464d-58a6-45d6-bc48-4bf88f246501" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.700290 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.710676 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2"] Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.718415 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-jjdl5" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.718685 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.718995 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.718702 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.719230 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.793617 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/28b7f396-866b-4dc5-9ed5-d45a94da5890-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dvwp2\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.793706 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-778rg\" (UniqueName: \"kubernetes.io/projected/28b7f396-866b-4dc5-9ed5-d45a94da5890-kube-api-access-778rg\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dvwp2\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.793776 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dvwp2\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.793862 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dvwp2\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.793896 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dvwp2\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.895961 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dvwp2\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.896061 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dvwp2\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.896187 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/28b7f396-866b-4dc5-9ed5-d45a94da5890-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dvwp2\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.896271 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-778rg\" (UniqueName: \"kubernetes.io/projected/28b7f396-866b-4dc5-9ed5-d45a94da5890-kube-api-access-778rg\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dvwp2\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.896394 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dvwp2\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.898970 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/28b7f396-866b-4dc5-9ed5-d45a94da5890-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dvwp2\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.903425 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dvwp2\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.903622 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dvwp2\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.906651 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dvwp2\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:36:17 crc kubenswrapper[4703]: I0130 12:36:17.921013 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-778rg\" (UniqueName: \"kubernetes.io/projected/28b7f396-866b-4dc5-9ed5-d45a94da5890-kube-api-access-778rg\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-dvwp2\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:36:18 crc kubenswrapper[4703]: I0130 12:36:18.019291 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:36:18 crc kubenswrapper[4703]: I0130 12:36:18.087722 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:36:18 crc kubenswrapper[4703]: E0130 12:36:18.088043 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:36:18 crc kubenswrapper[4703]: I0130 12:36:18.597142 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2"] Jan 30 12:36:18 crc kubenswrapper[4703]: W0130 12:36:18.604521 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28b7f396_866b_4dc5_9ed5_d45a94da5890.slice/crio-e3c98bf3d6fb18291f75d2729cda285f7c3c7683c0e684d0c0bfcbcf20d721e1 WatchSource:0}: Error finding container e3c98bf3d6fb18291f75d2729cda285f7c3c7683c0e684d0c0bfcbcf20d721e1: Status 404 returned error can't find the container with id e3c98bf3d6fb18291f75d2729cda285f7c3c7683c0e684d0c0bfcbcf20d721e1 Jan 30 12:36:19 crc kubenswrapper[4703]: I0130 12:36:19.605371 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" event={"ID":"28b7f396-866b-4dc5-9ed5-d45a94da5890","Type":"ContainerStarted","Data":"1c5cf1cb3c032e75dfe52b2e6e45c3cd60a93ae64f4528f87f3483523a172660"} Jan 30 12:36:19 crc kubenswrapper[4703]: I0130 12:36:19.605455 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" event={"ID":"28b7f396-866b-4dc5-9ed5-d45a94da5890","Type":"ContainerStarted","Data":"e3c98bf3d6fb18291f75d2729cda285f7c3c7683c0e684d0c0bfcbcf20d721e1"} Jan 30 12:36:19 crc kubenswrapper[4703]: I0130 12:36:19.636811 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" podStartSLOduration=2.16447346 podStartE2EDuration="2.636769882s" podCreationTimestamp="2026-01-30 12:36:17 +0000 UTC" firstStartedPulling="2026-01-30 12:36:18.60839842 +0000 UTC m=+2414.386220074" lastFinishedPulling="2026-01-30 12:36:19.080694842 +0000 UTC m=+2414.858516496" observedRunningTime="2026-01-30 12:36:19.629619991 +0000 UTC m=+2415.407441665" watchObservedRunningTime="2026-01-30 12:36:19.636769882 +0000 UTC m=+2415.414591536" Jan 30 12:36:30 crc kubenswrapper[4703]: I0130 12:36:30.087524 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:36:30 crc kubenswrapper[4703]: E0130 12:36:30.104815 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:36:42 crc kubenswrapper[4703]: I0130 12:36:42.823159 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:36:42 crc kubenswrapper[4703]: I0130 12:36:42.823805 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:36:42 crc kubenswrapper[4703]: I0130 12:36:42.823870 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 12:36:42 crc kubenswrapper[4703]: I0130 12:36:42.824978 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7"} pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 12:36:42 crc kubenswrapper[4703]: I0130 12:36:42.825058 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" containerID="cri-o://23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" gracePeriod=600 Jan 30 12:36:42 crc kubenswrapper[4703]: E0130 12:36:42.959156 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:36:43 crc kubenswrapper[4703]: I0130 12:36:43.904632 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerDied","Data":"23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7"} Jan 30 12:36:43 crc kubenswrapper[4703]: I0130 12:36:43.904610 4703 generic.go:334] "Generic (PLEG): container finished" podID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" exitCode=0 Jan 30 12:36:43 crc kubenswrapper[4703]: I0130 12:36:43.905255 4703 scope.go:117] "RemoveContainer" containerID="5fa5c313f1cbb35a96c31064d256324b86779414c02a6d827c9dc4a8b556105a" Jan 30 12:36:43 crc kubenswrapper[4703]: I0130 12:36:43.906053 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:36:43 crc kubenswrapper[4703]: E0130 12:36:43.906534 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:36:45 crc kubenswrapper[4703]: I0130 12:36:45.094902 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:36:45 crc kubenswrapper[4703]: E0130 12:36:45.095292 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:36:55 crc kubenswrapper[4703]: I0130 12:36:55.094833 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:36:55 crc kubenswrapper[4703]: E0130 12:36:55.095915 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:36:58 crc kubenswrapper[4703]: I0130 12:36:58.086732 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:36:58 crc kubenswrapper[4703]: E0130 12:36:58.087521 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:37:10 crc kubenswrapper[4703]: I0130 12:37:10.087536 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:37:10 crc kubenswrapper[4703]: I0130 12:37:10.088658 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:37:10 crc kubenswrapper[4703]: E0130 12:37:10.088771 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:37:10 crc kubenswrapper[4703]: E0130 12:37:10.089016 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:37:22 crc kubenswrapper[4703]: I0130 12:37:22.086769 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:37:22 crc kubenswrapper[4703]: E0130 12:37:22.087917 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:37:22 crc kubenswrapper[4703]: I0130 12:37:22.332478 4703 generic.go:334] "Generic (PLEG): container finished" podID="28b7f396-866b-4dc5-9ed5-d45a94da5890" containerID="1c5cf1cb3c032e75dfe52b2e6e45c3cd60a93ae64f4528f87f3483523a172660" exitCode=0 Jan 30 12:37:22 crc kubenswrapper[4703]: I0130 12:37:22.332533 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" event={"ID":"28b7f396-866b-4dc5-9ed5-d45a94da5890","Type":"ContainerDied","Data":"1c5cf1cb3c032e75dfe52b2e6e45c3cd60a93ae64f4528f87f3483523a172660"} Jan 30 12:37:23 crc kubenswrapper[4703]: I0130 12:37:23.882711 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:37:23 crc kubenswrapper[4703]: I0130 12:37:23.979683 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-inventory\") pod \"28b7f396-866b-4dc5-9ed5-d45a94da5890\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " Jan 30 12:37:23 crc kubenswrapper[4703]: I0130 12:37:23.979903 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-778rg\" (UniqueName: \"kubernetes.io/projected/28b7f396-866b-4dc5-9ed5-d45a94da5890-kube-api-access-778rg\") pod \"28b7f396-866b-4dc5-9ed5-d45a94da5890\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " Jan 30 12:37:23 crc kubenswrapper[4703]: I0130 12:37:23.979965 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-ovn-combined-ca-bundle\") pod \"28b7f396-866b-4dc5-9ed5-d45a94da5890\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " Jan 30 12:37:23 crc kubenswrapper[4703]: I0130 12:37:23.979995 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-ssh-key-openstack-edpm-ipam\") pod \"28b7f396-866b-4dc5-9ed5-d45a94da5890\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " Jan 30 12:37:23 crc kubenswrapper[4703]: I0130 12:37:23.980207 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/28b7f396-866b-4dc5-9ed5-d45a94da5890-ovncontroller-config-0\") pod \"28b7f396-866b-4dc5-9ed5-d45a94da5890\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " Jan 30 12:37:23 crc kubenswrapper[4703]: I0130 12:37:23.988227 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28b7f396-866b-4dc5-9ed5-d45a94da5890-kube-api-access-778rg" (OuterVolumeSpecName: "kube-api-access-778rg") pod "28b7f396-866b-4dc5-9ed5-d45a94da5890" (UID: "28b7f396-866b-4dc5-9ed5-d45a94da5890"). InnerVolumeSpecName "kube-api-access-778rg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.016392 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "28b7f396-866b-4dc5-9ed5-d45a94da5890" (UID: "28b7f396-866b-4dc5-9ed5-d45a94da5890"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.019293 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-inventory" (OuterVolumeSpecName: "inventory") pod "28b7f396-866b-4dc5-9ed5-d45a94da5890" (UID: "28b7f396-866b-4dc5-9ed5-d45a94da5890"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:37:24 crc kubenswrapper[4703]: E0130 12:37:24.023167 4703 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-ssh-key-openstack-edpm-ipam podName:28b7f396-866b-4dc5-9ed5-d45a94da5890 nodeName:}" failed. No retries permitted until 2026-01-30 12:37:24.523095954 +0000 UTC m=+2480.300917608 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "ssh-key-openstack-edpm-ipam" (UniqueName: "kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-ssh-key-openstack-edpm-ipam") pod "28b7f396-866b-4dc5-9ed5-d45a94da5890" (UID: "28b7f396-866b-4dc5-9ed5-d45a94da5890") : error deleting /var/lib/kubelet/pods/28b7f396-866b-4dc5-9ed5-d45a94da5890/volume-subpaths: remove /var/lib/kubelet/pods/28b7f396-866b-4dc5-9ed5-d45a94da5890/volume-subpaths: no such file or directory Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.023462 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28b7f396-866b-4dc5-9ed5-d45a94da5890-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "28b7f396-866b-4dc5-9ed5-d45a94da5890" (UID: "28b7f396-866b-4dc5-9ed5-d45a94da5890"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.083324 4703 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-inventory\") on node \"crc\" DevicePath \"\"" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.083390 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-778rg\" (UniqueName: \"kubernetes.io/projected/28b7f396-866b-4dc5-9ed5-d45a94da5890-kube-api-access-778rg\") on node \"crc\" DevicePath \"\"" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.083402 4703 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.083412 4703 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/28b7f396-866b-4dc5-9ed5-d45a94da5890-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.355500 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" event={"ID":"28b7f396-866b-4dc5-9ed5-d45a94da5890","Type":"ContainerDied","Data":"e3c98bf3d6fb18291f75d2729cda285f7c3c7683c0e684d0c0bfcbcf20d721e1"} Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.355578 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e3c98bf3d6fb18291f75d2729cda285f7c3c7683c0e684d0c0bfcbcf20d721e1" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.355599 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-dvwp2" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.596792 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-ssh-key-openstack-edpm-ipam\") pod \"28b7f396-866b-4dc5-9ed5-d45a94da5890\" (UID: \"28b7f396-866b-4dc5-9ed5-d45a94da5890\") " Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.597790 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld"] Jan 30 12:37:24 crc kubenswrapper[4703]: E0130 12:37:24.598329 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28b7f396-866b-4dc5-9ed5-d45a94da5890" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.598355 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="28b7f396-866b-4dc5-9ed5-d45a94da5890" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.598662 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="28b7f396-866b-4dc5-9ed5-d45a94da5890" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.599645 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.603446 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.603446 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.610779 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "28b7f396-866b-4dc5-9ed5-d45a94da5890" (UID: "28b7f396-866b-4dc5-9ed5-d45a94da5890"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.623009 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld"] Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.699354 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcp4d\" (UniqueName: \"kubernetes.io/projected/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-kube-api-access-mcp4d\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.699438 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.699515 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.699582 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.699652 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.699735 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.699796 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/28b7f396-866b-4dc5-9ed5-d45a94da5890-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.802706 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.802817 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.802946 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.802974 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcp4d\" (UniqueName: \"kubernetes.io/projected/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-kube-api-access-mcp4d\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.803007 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.803072 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.807010 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.807370 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.808048 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.808113 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.812010 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.831982 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcp4d\" (UniqueName: \"kubernetes.io/projected/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-kube-api-access-mcp4d\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:24 crc kubenswrapper[4703]: I0130 12:37:24.990757 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:37:25 crc kubenswrapper[4703]: I0130 12:37:25.088494 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:37:25 crc kubenswrapper[4703]: E0130 12:37:25.089398 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:37:25 crc kubenswrapper[4703]: I0130 12:37:25.645522 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld"] Jan 30 12:37:25 crc kubenswrapper[4703]: I0130 12:37:25.656400 4703 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 12:37:26 crc kubenswrapper[4703]: I0130 12:37:26.379109 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" event={"ID":"bb1b3231-4e16-4378-8c21-0c59eaa8dd29","Type":"ContainerStarted","Data":"27be2e365d8e1b5e5b32540dcf5c4475d2a6437fdf7719dfc3c76d7863ace3a8"} Jan 30 12:37:27 crc kubenswrapper[4703]: I0130 12:37:27.392285 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" event={"ID":"bb1b3231-4e16-4378-8c21-0c59eaa8dd29","Type":"ContainerStarted","Data":"e9e4016130e4e8cf4cb8bd12ae3787df16d405a601dadc1c8a21d2f02eb4136b"} Jan 30 12:37:27 crc kubenswrapper[4703]: I0130 12:37:27.422867 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" podStartSLOduration=2.914559487 podStartE2EDuration="3.422842405s" podCreationTimestamp="2026-01-30 12:37:24 +0000 UTC" firstStartedPulling="2026-01-30 12:37:25.65609296 +0000 UTC m=+2481.433914614" lastFinishedPulling="2026-01-30 12:37:26.164375878 +0000 UTC m=+2481.942197532" observedRunningTime="2026-01-30 12:37:27.421341185 +0000 UTC m=+2483.199162839" watchObservedRunningTime="2026-01-30 12:37:27.422842405 +0000 UTC m=+2483.200664059" Jan 30 12:37:33 crc kubenswrapper[4703]: I0130 12:37:33.086701 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:37:33 crc kubenswrapper[4703]: E0130 12:37:33.087868 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:37:37 crc kubenswrapper[4703]: I0130 12:37:37.087233 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:37:37 crc kubenswrapper[4703]: E0130 12:37:37.089536 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:37:46 crc kubenswrapper[4703]: I0130 12:37:46.087341 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:37:46 crc kubenswrapper[4703]: E0130 12:37:46.090337 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:37:49 crc kubenswrapper[4703]: I0130 12:37:49.087870 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:37:49 crc kubenswrapper[4703]: E0130 12:37:49.088828 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:38:00 crc kubenswrapper[4703]: I0130 12:38:00.086934 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:38:00 crc kubenswrapper[4703]: E0130 12:38:00.088442 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:38:04 crc kubenswrapper[4703]: I0130 12:38:04.087576 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:38:04 crc kubenswrapper[4703]: E0130 12:38:04.088807 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:38:14 crc kubenswrapper[4703]: I0130 12:38:14.093519 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:38:14 crc kubenswrapper[4703]: E0130 12:38:14.095276 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:38:16 crc kubenswrapper[4703]: I0130 12:38:16.087002 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:38:16 crc kubenswrapper[4703]: E0130 12:38:16.087859 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:38:18 crc kubenswrapper[4703]: I0130 12:38:18.066751 4703 generic.go:334] "Generic (PLEG): container finished" podID="bb1b3231-4e16-4378-8c21-0c59eaa8dd29" containerID="e9e4016130e4e8cf4cb8bd12ae3787df16d405a601dadc1c8a21d2f02eb4136b" exitCode=0 Jan 30 12:38:18 crc kubenswrapper[4703]: I0130 12:38:18.066810 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" event={"ID":"bb1b3231-4e16-4378-8c21-0c59eaa8dd29","Type":"ContainerDied","Data":"e9e4016130e4e8cf4cb8bd12ae3787df16d405a601dadc1c8a21d2f02eb4136b"} Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.597584 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.702407 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcp4d\" (UniqueName: \"kubernetes.io/projected/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-kube-api-access-mcp4d\") pod \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.702564 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-nova-metadata-neutron-config-0\") pod \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.702620 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-ssh-key-openstack-edpm-ipam\") pod \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.702641 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-inventory\") pod \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.702691 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-neutron-ovn-metadata-agent-neutron-config-0\") pod \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.702729 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-neutron-metadata-combined-ca-bundle\") pod \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\" (UID: \"bb1b3231-4e16-4378-8c21-0c59eaa8dd29\") " Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.709844 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-kube-api-access-mcp4d" (OuterVolumeSpecName: "kube-api-access-mcp4d") pod "bb1b3231-4e16-4378-8c21-0c59eaa8dd29" (UID: "bb1b3231-4e16-4378-8c21-0c59eaa8dd29"). InnerVolumeSpecName "kube-api-access-mcp4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.711071 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "bb1b3231-4e16-4378-8c21-0c59eaa8dd29" (UID: "bb1b3231-4e16-4378-8c21-0c59eaa8dd29"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.741037 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "bb1b3231-4e16-4378-8c21-0c59eaa8dd29" (UID: "bb1b3231-4e16-4378-8c21-0c59eaa8dd29"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.741789 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "bb1b3231-4e16-4378-8c21-0c59eaa8dd29" (UID: "bb1b3231-4e16-4378-8c21-0c59eaa8dd29"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.743511 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "bb1b3231-4e16-4378-8c21-0c59eaa8dd29" (UID: "bb1b3231-4e16-4378-8c21-0c59eaa8dd29"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.746851 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-inventory" (OuterVolumeSpecName: "inventory") pod "bb1b3231-4e16-4378-8c21-0c59eaa8dd29" (UID: "bb1b3231-4e16-4378-8c21-0c59eaa8dd29"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.806559 4703 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.806953 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.807073 4703 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-inventory\") on node \"crc\" DevicePath \"\"" Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.807211 4703 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.807306 4703 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:38:19 crc kubenswrapper[4703]: I0130 12:38:19.807399 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcp4d\" (UniqueName: \"kubernetes.io/projected/bb1b3231-4e16-4378-8c21-0c59eaa8dd29-kube-api-access-mcp4d\") on node \"crc\" DevicePath \"\"" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.092715 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" event={"ID":"bb1b3231-4e16-4378-8c21-0c59eaa8dd29","Type":"ContainerDied","Data":"27be2e365d8e1b5e5b32540dcf5c4475d2a6437fdf7719dfc3c76d7863ace3a8"} Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.092779 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27be2e365d8e1b5e5b32540dcf5c4475d2a6437fdf7719dfc3c76d7863ace3a8" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.092854 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.203976 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb"] Jan 30 12:38:20 crc kubenswrapper[4703]: E0130 12:38:20.204583 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb1b3231-4e16-4378-8c21-0c59eaa8dd29" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.204610 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb1b3231-4e16-4378-8c21-0c59eaa8dd29" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.204856 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb1b3231-4e16-4378-8c21-0c59eaa8dd29" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.205797 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.210760 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.211028 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-jjdl5" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.211559 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.211713 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.216238 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.216470 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6m2h\" (UniqueName: \"kubernetes.io/projected/201fa630-41e7-4070-9460-1f1b10397de8-kube-api-access-f6m2h\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tctmb\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.216648 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tctmb\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.217051 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tctmb\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.217235 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tctmb\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.217389 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tctmb\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.223272 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb"] Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.318719 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tctmb\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.318805 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tctmb\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.318851 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tctmb\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.318902 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6m2h\" (UniqueName: \"kubernetes.io/projected/201fa630-41e7-4070-9460-1f1b10397de8-kube-api-access-f6m2h\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tctmb\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.318960 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tctmb\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.324232 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tctmb\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.325355 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tctmb\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.325587 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tctmb\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.329654 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tctmb\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.341099 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6m2h\" (UniqueName: \"kubernetes.io/projected/201fa630-41e7-4070-9460-1f1b10397de8-kube-api-access-f6m2h\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-tctmb\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:38:20 crc kubenswrapper[4703]: I0130 12:38:20.529405 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:38:21 crc kubenswrapper[4703]: I0130 12:38:21.124022 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb"] Jan 30 12:38:21 crc kubenswrapper[4703]: W0130 12:38:21.133538 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod201fa630_41e7_4070_9460_1f1b10397de8.slice/crio-e73c86e3af1a7855e22851af1fdffa8f78a26cae192c388dec31e2ca5d113772 WatchSource:0}: Error finding container e73c86e3af1a7855e22851af1fdffa8f78a26cae192c388dec31e2ca5d113772: Status 404 returned error can't find the container with id e73c86e3af1a7855e22851af1fdffa8f78a26cae192c388dec31e2ca5d113772 Jan 30 12:38:22 crc kubenswrapper[4703]: I0130 12:38:22.116942 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" event={"ID":"201fa630-41e7-4070-9460-1f1b10397de8","Type":"ContainerStarted","Data":"967aca2e462db3e449dcc829ed941435e90b9609388cedaa836611264086cfdb"} Jan 30 12:38:22 crc kubenswrapper[4703]: I0130 12:38:22.118043 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" event={"ID":"201fa630-41e7-4070-9460-1f1b10397de8","Type":"ContainerStarted","Data":"e73c86e3af1a7855e22851af1fdffa8f78a26cae192c388dec31e2ca5d113772"} Jan 30 12:38:22 crc kubenswrapper[4703]: I0130 12:38:22.137460 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" podStartSLOduration=1.67951495 podStartE2EDuration="2.137432266s" podCreationTimestamp="2026-01-30 12:38:20 +0000 UTC" firstStartedPulling="2026-01-30 12:38:21.136355966 +0000 UTC m=+2536.914177620" lastFinishedPulling="2026-01-30 12:38:21.594273282 +0000 UTC m=+2537.372094936" observedRunningTime="2026-01-30 12:38:22.135497544 +0000 UTC m=+2537.913319188" watchObservedRunningTime="2026-01-30 12:38:22.137432266 +0000 UTC m=+2537.915253920" Jan 30 12:38:28 crc kubenswrapper[4703]: I0130 12:38:28.087293 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:38:28 crc kubenswrapper[4703]: E0130 12:38:28.088466 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:38:29 crc kubenswrapper[4703]: I0130 12:38:29.087178 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:38:29 crc kubenswrapper[4703]: E0130 12:38:29.087626 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:38:41 crc kubenswrapper[4703]: I0130 12:38:41.086030 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:38:41 crc kubenswrapper[4703]: E0130 12:38:41.086937 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:38:42 crc kubenswrapper[4703]: I0130 12:38:42.087382 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:38:42 crc kubenswrapper[4703]: E0130 12:38:42.088188 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:38:53 crc kubenswrapper[4703]: I0130 12:38:53.086640 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:38:53 crc kubenswrapper[4703]: E0130 12:38:53.087810 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:38:56 crc kubenswrapper[4703]: I0130 12:38:56.086871 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:38:56 crc kubenswrapper[4703]: E0130 12:38:56.087374 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:39:04 crc kubenswrapper[4703]: I0130 12:39:04.086857 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:39:04 crc kubenswrapper[4703]: E0130 12:39:04.090495 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:39:08 crc kubenswrapper[4703]: I0130 12:39:08.087992 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:39:08 crc kubenswrapper[4703]: E0130 12:39:08.088917 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:39:17 crc kubenswrapper[4703]: I0130 12:39:17.087577 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:39:17 crc kubenswrapper[4703]: E0130 12:39:17.090760 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:39:21 crc kubenswrapper[4703]: I0130 12:39:21.087310 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:39:21 crc kubenswrapper[4703]: E0130 12:39:21.088105 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:39:31 crc kubenswrapper[4703]: I0130 12:39:31.087403 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:39:31 crc kubenswrapper[4703]: E0130 12:39:31.088666 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:39:32 crc kubenswrapper[4703]: I0130 12:39:32.087667 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:39:32 crc kubenswrapper[4703]: E0130 12:39:32.088207 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:39:43 crc kubenswrapper[4703]: I0130 12:39:43.087245 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:39:43 crc kubenswrapper[4703]: E0130 12:39:43.088650 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:39:46 crc kubenswrapper[4703]: I0130 12:39:46.087006 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:39:46 crc kubenswrapper[4703]: E0130 12:39:46.087883 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:39:57 crc kubenswrapper[4703]: I0130 12:39:57.087288 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:39:57 crc kubenswrapper[4703]: E0130 12:39:57.088501 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:40:01 crc kubenswrapper[4703]: I0130 12:40:01.087530 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:40:01 crc kubenswrapper[4703]: E0130 12:40:01.088692 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:40:11 crc kubenswrapper[4703]: I0130 12:40:11.086732 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:40:11 crc kubenswrapper[4703]: E0130 12:40:11.088182 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:40:14 crc kubenswrapper[4703]: I0130 12:40:14.087775 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:40:14 crc kubenswrapper[4703]: E0130 12:40:14.089111 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:40:24 crc kubenswrapper[4703]: I0130 12:40:24.086701 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:40:24 crc kubenswrapper[4703]: E0130 12:40:24.087933 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:40:28 crc kubenswrapper[4703]: I0130 12:40:28.193106 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:40:28 crc kubenswrapper[4703]: E0130 12:40:28.194180 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:40:37 crc kubenswrapper[4703]: I0130 12:40:37.086675 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:40:37 crc kubenswrapper[4703]: E0130 12:40:37.088106 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:40:42 crc kubenswrapper[4703]: I0130 12:40:42.087821 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:40:42 crc kubenswrapper[4703]: E0130 12:40:42.089057 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:40:50 crc kubenswrapper[4703]: I0130 12:40:50.087236 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:40:50 crc kubenswrapper[4703]: E0130 12:40:50.087909 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:40:54 crc kubenswrapper[4703]: I0130 12:40:54.086547 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:40:54 crc kubenswrapper[4703]: E0130 12:40:54.087724 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:40:58 crc kubenswrapper[4703]: I0130 12:40:58.706598 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9n7jl"] Jan 30 12:40:58 crc kubenswrapper[4703]: I0130 12:40:58.709993 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9n7jl" Jan 30 12:40:58 crc kubenswrapper[4703]: I0130 12:40:58.721823 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9n7jl"] Jan 30 12:40:58 crc kubenswrapper[4703]: I0130 12:40:58.839018 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-utilities\") pod \"redhat-operators-9n7jl\" (UID: \"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a\") " pod="openshift-marketplace/redhat-operators-9n7jl" Jan 30 12:40:58 crc kubenswrapper[4703]: I0130 12:40:58.839152 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nn6cm\" (UniqueName: \"kubernetes.io/projected/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-kube-api-access-nn6cm\") pod \"redhat-operators-9n7jl\" (UID: \"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a\") " pod="openshift-marketplace/redhat-operators-9n7jl" Jan 30 12:40:58 crc kubenswrapper[4703]: I0130 12:40:58.839177 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-catalog-content\") pod \"redhat-operators-9n7jl\" (UID: \"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a\") " pod="openshift-marketplace/redhat-operators-9n7jl" Jan 30 12:40:58 crc kubenswrapper[4703]: I0130 12:40:58.941511 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-utilities\") pod \"redhat-operators-9n7jl\" (UID: \"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a\") " pod="openshift-marketplace/redhat-operators-9n7jl" Jan 30 12:40:58 crc kubenswrapper[4703]: I0130 12:40:58.941614 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nn6cm\" (UniqueName: \"kubernetes.io/projected/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-kube-api-access-nn6cm\") pod \"redhat-operators-9n7jl\" (UID: \"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a\") " pod="openshift-marketplace/redhat-operators-9n7jl" Jan 30 12:40:58 crc kubenswrapper[4703]: I0130 12:40:58.941646 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-catalog-content\") pod \"redhat-operators-9n7jl\" (UID: \"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a\") " pod="openshift-marketplace/redhat-operators-9n7jl" Jan 30 12:40:58 crc kubenswrapper[4703]: I0130 12:40:58.942264 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-catalog-content\") pod \"redhat-operators-9n7jl\" (UID: \"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a\") " pod="openshift-marketplace/redhat-operators-9n7jl" Jan 30 12:40:58 crc kubenswrapper[4703]: I0130 12:40:58.942274 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-utilities\") pod \"redhat-operators-9n7jl\" (UID: \"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a\") " pod="openshift-marketplace/redhat-operators-9n7jl" Jan 30 12:40:58 crc kubenswrapper[4703]: I0130 12:40:58.971602 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nn6cm\" (UniqueName: \"kubernetes.io/projected/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-kube-api-access-nn6cm\") pod \"redhat-operators-9n7jl\" (UID: \"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a\") " pod="openshift-marketplace/redhat-operators-9n7jl" Jan 30 12:40:59 crc kubenswrapper[4703]: I0130 12:40:59.063745 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9n7jl" Jan 30 12:40:59 crc kubenswrapper[4703]: I0130 12:40:59.666935 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9n7jl"] Jan 30 12:41:00 crc kubenswrapper[4703]: I0130 12:41:00.477761 4703 generic.go:334] "Generic (PLEG): container finished" podID="d94d4f4d-417d-4755-96da-3f2b5f3ddb2a" containerID="9a22ebd91af158cf1e1b9629cf6c4b82f82e0dd8918b6ef1eb9fc7884fa7d976" exitCode=0 Jan 30 12:41:00 crc kubenswrapper[4703]: I0130 12:41:00.477825 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9n7jl" event={"ID":"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a","Type":"ContainerDied","Data":"9a22ebd91af158cf1e1b9629cf6c4b82f82e0dd8918b6ef1eb9fc7884fa7d976"} Jan 30 12:41:00 crc kubenswrapper[4703]: I0130 12:41:00.477865 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9n7jl" event={"ID":"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a","Type":"ContainerStarted","Data":"4315613ff802e593ac695826d96b307e4bcbaf3b64625af225ee748244872ad9"} Jan 30 12:41:01 crc kubenswrapper[4703]: I0130 12:41:01.493973 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9n7jl" event={"ID":"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a","Type":"ContainerStarted","Data":"18ecc262112849c59489e2550bf4285dd192f6778d2ecad80d049f169c0c3ba7"} Jan 30 12:41:02 crc kubenswrapper[4703]: I0130 12:41:02.463183 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:41:03 crc kubenswrapper[4703]: I0130 12:41:03.537037 4703 generic.go:334] "Generic (PLEG): container finished" podID="d94d4f4d-417d-4755-96da-3f2b5f3ddb2a" containerID="18ecc262112849c59489e2550bf4285dd192f6778d2ecad80d049f169c0c3ba7" exitCode=0 Jan 30 12:41:03 crc kubenswrapper[4703]: I0130 12:41:03.537187 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9n7jl" event={"ID":"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a","Type":"ContainerDied","Data":"18ecc262112849c59489e2550bf4285dd192f6778d2ecad80d049f169c0c3ba7"} Jan 30 12:41:03 crc kubenswrapper[4703]: I0130 12:41:03.545797 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerStarted","Data":"92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29"} Jan 30 12:41:04 crc kubenswrapper[4703]: I0130 12:41:04.562154 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9n7jl" event={"ID":"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a","Type":"ContainerStarted","Data":"23bbd355a455352365fccbd799d36b9d7f1cc28dc92e197b3909c3eab4dd77c6"} Jan 30 12:41:05 crc kubenswrapper[4703]: I0130 12:41:05.087001 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:41:05 crc kubenswrapper[4703]: E0130 12:41:05.087603 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:41:06 crc kubenswrapper[4703]: I0130 12:41:06.035310 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:41:06 crc kubenswrapper[4703]: I0130 12:41:06.035797 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:41:06 crc kubenswrapper[4703]: I0130 12:41:06.071824 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 30 12:41:06 crc kubenswrapper[4703]: I0130 12:41:06.107674 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9n7jl" podStartSLOduration=4.526410105 podStartE2EDuration="8.107634642s" podCreationTimestamp="2026-01-30 12:40:58 +0000 UTC" firstStartedPulling="2026-01-30 12:41:00.479890921 +0000 UTC m=+2696.257712575" lastFinishedPulling="2026-01-30 12:41:04.061115458 +0000 UTC m=+2699.838937112" observedRunningTime="2026-01-30 12:41:04.591504402 +0000 UTC m=+2700.369326056" watchObservedRunningTime="2026-01-30 12:41:06.107634642 +0000 UTC m=+2701.885456296" Jan 30 12:41:06 crc kubenswrapper[4703]: E0130 12:41:06.583736 4703 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29 is running failed: container process not found" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 30 12:41:06 crc kubenswrapper[4703]: E0130 12:41:06.584522 4703 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29 is running failed: container process not found" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 30 12:41:06 crc kubenswrapper[4703]: E0130 12:41:06.584887 4703 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29 is running failed: container process not found" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 30 12:41:06 crc kubenswrapper[4703]: E0130 12:41:06.584983 4703 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" containerName="nova-scheduler-scheduler" Jan 30 12:41:07 crc kubenswrapper[4703]: I0130 12:41:07.596531 4703 generic.go:334] "Generic (PLEG): container finished" podID="2fc19a6b-3cde-4bb5-9499-f5be846289da" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" exitCode=1 Jan 30 12:41:07 crc kubenswrapper[4703]: I0130 12:41:07.597073 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerDied","Data":"92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29"} Jan 30 12:41:07 crc kubenswrapper[4703]: I0130 12:41:07.597156 4703 scope.go:117] "RemoveContainer" containerID="82041d87580a2501d6415ce92f4c7189941a1e07bd6fd1ff7a7b1939d6f84739" Jan 30 12:41:07 crc kubenswrapper[4703]: I0130 12:41:07.598293 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:41:07 crc kubenswrapper[4703]: E0130 12:41:07.598639 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:41:09 crc kubenswrapper[4703]: I0130 12:41:09.064838 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9n7jl" Jan 30 12:41:09 crc kubenswrapper[4703]: I0130 12:41:09.065560 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9n7jl" Jan 30 12:41:10 crc kubenswrapper[4703]: I0130 12:41:10.117101 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-9n7jl" podUID="d94d4f4d-417d-4755-96da-3f2b5f3ddb2a" containerName="registry-server" probeResult="failure" output=< Jan 30 12:41:10 crc kubenswrapper[4703]: timeout: failed to connect service ":50051" within 1s Jan 30 12:41:10 crc kubenswrapper[4703]: > Jan 30 12:41:16 crc kubenswrapper[4703]: I0130 12:41:16.035412 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:41:16 crc kubenswrapper[4703]: I0130 12:41:16.036465 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:41:16 crc kubenswrapper[4703]: I0130 12:41:16.037549 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:41:16 crc kubenswrapper[4703]: E0130 12:41:16.037886 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:41:18 crc kubenswrapper[4703]: I0130 12:41:18.087255 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:41:18 crc kubenswrapper[4703]: E0130 12:41:18.088058 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:41:19 crc kubenswrapper[4703]: I0130 12:41:19.135837 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9n7jl" Jan 30 12:41:19 crc kubenswrapper[4703]: I0130 12:41:19.192719 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9n7jl" Jan 30 12:41:19 crc kubenswrapper[4703]: I0130 12:41:19.377791 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9n7jl"] Jan 30 12:41:20 crc kubenswrapper[4703]: I0130 12:41:20.747670 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-9n7jl" podUID="d94d4f4d-417d-4755-96da-3f2b5f3ddb2a" containerName="registry-server" containerID="cri-o://23bbd355a455352365fccbd799d36b9d7f1cc28dc92e197b3909c3eab4dd77c6" gracePeriod=2 Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.249420 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9n7jl" Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.320267 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-catalog-content\") pod \"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a\" (UID: \"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a\") " Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.320543 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-utilities\") pod \"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a\" (UID: \"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a\") " Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.320845 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nn6cm\" (UniqueName: \"kubernetes.io/projected/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-kube-api-access-nn6cm\") pod \"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a\" (UID: \"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a\") " Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.321537 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-utilities" (OuterVolumeSpecName: "utilities") pod "d94d4f4d-417d-4755-96da-3f2b5f3ddb2a" (UID: "d94d4f4d-417d-4755-96da-3f2b5f3ddb2a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.322191 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.332613 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-kube-api-access-nn6cm" (OuterVolumeSpecName: "kube-api-access-nn6cm") pod "d94d4f4d-417d-4755-96da-3f2b5f3ddb2a" (UID: "d94d4f4d-417d-4755-96da-3f2b5f3ddb2a"). InnerVolumeSpecName "kube-api-access-nn6cm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.423784 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nn6cm\" (UniqueName: \"kubernetes.io/projected/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-kube-api-access-nn6cm\") on node \"crc\" DevicePath \"\"" Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.454311 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d94d4f4d-417d-4755-96da-3f2b5f3ddb2a" (UID: "d94d4f4d-417d-4755-96da-3f2b5f3ddb2a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.526289 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.762546 4703 generic.go:334] "Generic (PLEG): container finished" podID="d94d4f4d-417d-4755-96da-3f2b5f3ddb2a" containerID="23bbd355a455352365fccbd799d36b9d7f1cc28dc92e197b3909c3eab4dd77c6" exitCode=0 Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.762638 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9n7jl" Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.762636 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9n7jl" event={"ID":"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a","Type":"ContainerDied","Data":"23bbd355a455352365fccbd799d36b9d7f1cc28dc92e197b3909c3eab4dd77c6"} Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.763180 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9n7jl" event={"ID":"d94d4f4d-417d-4755-96da-3f2b5f3ddb2a","Type":"ContainerDied","Data":"4315613ff802e593ac695826d96b307e4bcbaf3b64625af225ee748244872ad9"} Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.763204 4703 scope.go:117] "RemoveContainer" containerID="23bbd355a455352365fccbd799d36b9d7f1cc28dc92e197b3909c3eab4dd77c6" Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.788334 4703 scope.go:117] "RemoveContainer" containerID="18ecc262112849c59489e2550bf4285dd192f6778d2ecad80d049f169c0c3ba7" Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.811948 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-9n7jl"] Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.822030 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-9n7jl"] Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.827340 4703 scope.go:117] "RemoveContainer" containerID="9a22ebd91af158cf1e1b9629cf6c4b82f82e0dd8918b6ef1eb9fc7884fa7d976" Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.876784 4703 scope.go:117] "RemoveContainer" containerID="23bbd355a455352365fccbd799d36b9d7f1cc28dc92e197b3909c3eab4dd77c6" Jan 30 12:41:21 crc kubenswrapper[4703]: E0130 12:41:21.877529 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23bbd355a455352365fccbd799d36b9d7f1cc28dc92e197b3909c3eab4dd77c6\": container with ID starting with 23bbd355a455352365fccbd799d36b9d7f1cc28dc92e197b3909c3eab4dd77c6 not found: ID does not exist" containerID="23bbd355a455352365fccbd799d36b9d7f1cc28dc92e197b3909c3eab4dd77c6" Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.877605 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23bbd355a455352365fccbd799d36b9d7f1cc28dc92e197b3909c3eab4dd77c6"} err="failed to get container status \"23bbd355a455352365fccbd799d36b9d7f1cc28dc92e197b3909c3eab4dd77c6\": rpc error: code = NotFound desc = could not find container \"23bbd355a455352365fccbd799d36b9d7f1cc28dc92e197b3909c3eab4dd77c6\": container with ID starting with 23bbd355a455352365fccbd799d36b9d7f1cc28dc92e197b3909c3eab4dd77c6 not found: ID does not exist" Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.877635 4703 scope.go:117] "RemoveContainer" containerID="18ecc262112849c59489e2550bf4285dd192f6778d2ecad80d049f169c0c3ba7" Jan 30 12:41:21 crc kubenswrapper[4703]: E0130 12:41:21.878418 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18ecc262112849c59489e2550bf4285dd192f6778d2ecad80d049f169c0c3ba7\": container with ID starting with 18ecc262112849c59489e2550bf4285dd192f6778d2ecad80d049f169c0c3ba7 not found: ID does not exist" containerID="18ecc262112849c59489e2550bf4285dd192f6778d2ecad80d049f169c0c3ba7" Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.878465 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18ecc262112849c59489e2550bf4285dd192f6778d2ecad80d049f169c0c3ba7"} err="failed to get container status \"18ecc262112849c59489e2550bf4285dd192f6778d2ecad80d049f169c0c3ba7\": rpc error: code = NotFound desc = could not find container \"18ecc262112849c59489e2550bf4285dd192f6778d2ecad80d049f169c0c3ba7\": container with ID starting with 18ecc262112849c59489e2550bf4285dd192f6778d2ecad80d049f169c0c3ba7 not found: ID does not exist" Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.878500 4703 scope.go:117] "RemoveContainer" containerID="9a22ebd91af158cf1e1b9629cf6c4b82f82e0dd8918b6ef1eb9fc7884fa7d976" Jan 30 12:41:21 crc kubenswrapper[4703]: E0130 12:41:21.879026 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a22ebd91af158cf1e1b9629cf6c4b82f82e0dd8918b6ef1eb9fc7884fa7d976\": container with ID starting with 9a22ebd91af158cf1e1b9629cf6c4b82f82e0dd8918b6ef1eb9fc7884fa7d976 not found: ID does not exist" containerID="9a22ebd91af158cf1e1b9629cf6c4b82f82e0dd8918b6ef1eb9fc7884fa7d976" Jan 30 12:41:21 crc kubenswrapper[4703]: I0130 12:41:21.879095 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a22ebd91af158cf1e1b9629cf6c4b82f82e0dd8918b6ef1eb9fc7884fa7d976"} err="failed to get container status \"9a22ebd91af158cf1e1b9629cf6c4b82f82e0dd8918b6ef1eb9fc7884fa7d976\": rpc error: code = NotFound desc = could not find container \"9a22ebd91af158cf1e1b9629cf6c4b82f82e0dd8918b6ef1eb9fc7884fa7d976\": container with ID starting with 9a22ebd91af158cf1e1b9629cf6c4b82f82e0dd8918b6ef1eb9fc7884fa7d976 not found: ID does not exist" Jan 30 12:41:23 crc kubenswrapper[4703]: I0130 12:41:23.101990 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d94d4f4d-417d-4755-96da-3f2b5f3ddb2a" path="/var/lib/kubelet/pods/d94d4f4d-417d-4755-96da-3f2b5f3ddb2a/volumes" Jan 30 12:41:29 crc kubenswrapper[4703]: I0130 12:41:29.087418 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:41:29 crc kubenswrapper[4703]: E0130 12:41:29.089567 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:41:31 crc kubenswrapper[4703]: I0130 12:41:31.087209 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:41:31 crc kubenswrapper[4703]: E0130 12:41:31.088069 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:41:40 crc kubenswrapper[4703]: I0130 12:41:40.087392 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:41:40 crc kubenswrapper[4703]: E0130 12:41:40.088598 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:41:42 crc kubenswrapper[4703]: I0130 12:41:42.087021 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:41:42 crc kubenswrapper[4703]: E0130 12:41:42.087878 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:41:54 crc kubenswrapper[4703]: I0130 12:41:54.087574 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:41:54 crc kubenswrapper[4703]: I0130 12:41:54.088579 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:41:54 crc kubenswrapper[4703]: E0130 12:41:54.088889 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:41:55 crc kubenswrapper[4703]: I0130 12:41:55.157548 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerStarted","Data":"f52cc783a86b634ff8736b30e24b266ea538324fc6801b094268d35989eedffc"} Jan 30 12:42:05 crc kubenswrapper[4703]: I0130 12:42:05.094857 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:42:05 crc kubenswrapper[4703]: E0130 12:42:05.096007 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:42:10 crc kubenswrapper[4703]: I0130 12:42:10.323141 4703 generic.go:334] "Generic (PLEG): container finished" podID="201fa630-41e7-4070-9460-1f1b10397de8" containerID="967aca2e462db3e449dcc829ed941435e90b9609388cedaa836611264086cfdb" exitCode=0 Jan 30 12:42:10 crc kubenswrapper[4703]: I0130 12:42:10.323263 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" event={"ID":"201fa630-41e7-4070-9460-1f1b10397de8","Type":"ContainerDied","Data":"967aca2e462db3e449dcc829ed941435e90b9609388cedaa836611264086cfdb"} Jan 30 12:42:11 crc kubenswrapper[4703]: I0130 12:42:11.824499 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:42:11 crc kubenswrapper[4703]: I0130 12:42:11.903007 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-inventory\") pod \"201fa630-41e7-4070-9460-1f1b10397de8\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " Jan 30 12:42:11 crc kubenswrapper[4703]: I0130 12:42:11.903675 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-libvirt-secret-0\") pod \"201fa630-41e7-4070-9460-1f1b10397de8\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " Jan 30 12:42:11 crc kubenswrapper[4703]: I0130 12:42:11.904029 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-libvirt-combined-ca-bundle\") pod \"201fa630-41e7-4070-9460-1f1b10397de8\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " Jan 30 12:42:11 crc kubenswrapper[4703]: I0130 12:42:11.904193 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6m2h\" (UniqueName: \"kubernetes.io/projected/201fa630-41e7-4070-9460-1f1b10397de8-kube-api-access-f6m2h\") pod \"201fa630-41e7-4070-9460-1f1b10397de8\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " Jan 30 12:42:11 crc kubenswrapper[4703]: I0130 12:42:11.904365 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-ssh-key-openstack-edpm-ipam\") pod \"201fa630-41e7-4070-9460-1f1b10397de8\" (UID: \"201fa630-41e7-4070-9460-1f1b10397de8\") " Jan 30 12:42:11 crc kubenswrapper[4703]: I0130 12:42:11.914353 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "201fa630-41e7-4070-9460-1f1b10397de8" (UID: "201fa630-41e7-4070-9460-1f1b10397de8"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:42:11 crc kubenswrapper[4703]: I0130 12:42:11.914613 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/201fa630-41e7-4070-9460-1f1b10397de8-kube-api-access-f6m2h" (OuterVolumeSpecName: "kube-api-access-f6m2h") pod "201fa630-41e7-4070-9460-1f1b10397de8" (UID: "201fa630-41e7-4070-9460-1f1b10397de8"). InnerVolumeSpecName "kube-api-access-f6m2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:42:11 crc kubenswrapper[4703]: I0130 12:42:11.944582 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-inventory" (OuterVolumeSpecName: "inventory") pod "201fa630-41e7-4070-9460-1f1b10397de8" (UID: "201fa630-41e7-4070-9460-1f1b10397de8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:42:11 crc kubenswrapper[4703]: I0130 12:42:11.946854 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "201fa630-41e7-4070-9460-1f1b10397de8" (UID: "201fa630-41e7-4070-9460-1f1b10397de8"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:42:11 crc kubenswrapper[4703]: I0130 12:42:11.956903 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "201fa630-41e7-4070-9460-1f1b10397de8" (UID: "201fa630-41e7-4070-9460-1f1b10397de8"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.007355 4703 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.007397 4703 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.007411 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6m2h\" (UniqueName: \"kubernetes.io/projected/201fa630-41e7-4070-9460-1f1b10397de8-kube-api-access-f6m2h\") on node \"crc\" DevicePath \"\"" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.007419 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.007429 4703 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/201fa630-41e7-4070-9460-1f1b10397de8-inventory\") on node \"crc\" DevicePath \"\"" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.373935 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" event={"ID":"201fa630-41e7-4070-9460-1f1b10397de8","Type":"ContainerDied","Data":"e73c86e3af1a7855e22851af1fdffa8f78a26cae192c388dec31e2ca5d113772"} Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.373997 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e73c86e3af1a7855e22851af1fdffa8f78a26cae192c388dec31e2ca5d113772" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.374021 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-tctmb" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.485998 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv"] Jan 30 12:42:12 crc kubenswrapper[4703]: E0130 12:42:12.486654 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d94d4f4d-417d-4755-96da-3f2b5f3ddb2a" containerName="extract-content" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.486689 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="d94d4f4d-417d-4755-96da-3f2b5f3ddb2a" containerName="extract-content" Jan 30 12:42:12 crc kubenswrapper[4703]: E0130 12:42:12.486714 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d94d4f4d-417d-4755-96da-3f2b5f3ddb2a" containerName="registry-server" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.486723 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="d94d4f4d-417d-4755-96da-3f2b5f3ddb2a" containerName="registry-server" Jan 30 12:42:12 crc kubenswrapper[4703]: E0130 12:42:12.486746 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="201fa630-41e7-4070-9460-1f1b10397de8" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.486757 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="201fa630-41e7-4070-9460-1f1b10397de8" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 30 12:42:12 crc kubenswrapper[4703]: E0130 12:42:12.486789 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d94d4f4d-417d-4755-96da-3f2b5f3ddb2a" containerName="extract-utilities" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.486798 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="d94d4f4d-417d-4755-96da-3f2b5f3ddb2a" containerName="extract-utilities" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.487030 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="201fa630-41e7-4070-9460-1f1b10397de8" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.487053 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="d94d4f4d-417d-4755-96da-3f2b5f3ddb2a" containerName="registry-server" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.487993 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.490930 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.491055 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.491281 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-jjdl5" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.491464 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.492582 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.493078 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.494090 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.499183 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv"] Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.627350 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.627480 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.627554 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.627672 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5pvd\" (UniqueName: \"kubernetes.io/projected/79c7c216-3c2c-4744-b6c5-d97feb476cdd-kube-api-access-l5pvd\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.627743 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.627780 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.627799 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.627828 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.627882 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.730711 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5pvd\" (UniqueName: \"kubernetes.io/projected/79c7c216-3c2c-4744-b6c5-d97feb476cdd-kube-api-access-l5pvd\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.730902 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.730949 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.730986 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.731061 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.731214 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.731286 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.731397 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.731501 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.732824 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.741005 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.741043 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.741259 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.742501 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.743233 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.743737 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.744202 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.758222 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5pvd\" (UniqueName: \"kubernetes.io/projected/79c7c216-3c2c-4744-b6c5-d97feb476cdd-kube-api-access-l5pvd\") pod \"nova-edpm-deployment-openstack-edpm-ipam-d9klv\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:12 crc kubenswrapper[4703]: I0130 12:42:12.814068 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:42:13 crc kubenswrapper[4703]: I0130 12:42:13.262461 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv"] Jan 30 12:42:13 crc kubenswrapper[4703]: I0130 12:42:13.386289 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" event={"ID":"79c7c216-3c2c-4744-b6c5-d97feb476cdd","Type":"ContainerStarted","Data":"b304e32677e488024d19ae7022478c2f5382c9f9d580f33115d94079d2f4aa63"} Jan 30 12:42:14 crc kubenswrapper[4703]: I0130 12:42:14.397061 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" event={"ID":"79c7c216-3c2c-4744-b6c5-d97feb476cdd","Type":"ContainerStarted","Data":"e1905c0db3c8b170f6837c6a3b1467b5927bed2807dcee978938ca1565ecf511"} Jan 30 12:42:17 crc kubenswrapper[4703]: I0130 12:42:17.086997 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:42:17 crc kubenswrapper[4703]: E0130 12:42:17.088562 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:42:32 crc kubenswrapper[4703]: I0130 12:42:32.087083 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:42:32 crc kubenswrapper[4703]: E0130 12:42:32.088452 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:42:38 crc kubenswrapper[4703]: I0130 12:42:38.736871 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" podStartSLOduration=26.149929945 podStartE2EDuration="26.736847475s" podCreationTimestamp="2026-01-30 12:42:12 +0000 UTC" firstStartedPulling="2026-01-30 12:42:13.284155223 +0000 UTC m=+2769.061976867" lastFinishedPulling="2026-01-30 12:42:13.871072743 +0000 UTC m=+2769.648894397" observedRunningTime="2026-01-30 12:42:14.419685169 +0000 UTC m=+2770.197506823" watchObservedRunningTime="2026-01-30 12:42:38.736847475 +0000 UTC m=+2794.514669139" Jan 30 12:42:38 crc kubenswrapper[4703]: I0130 12:42:38.743457 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wfpl7"] Jan 30 12:42:38 crc kubenswrapper[4703]: I0130 12:42:38.746393 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wfpl7" Jan 30 12:42:38 crc kubenswrapper[4703]: I0130 12:42:38.762662 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wfpl7"] Jan 30 12:42:38 crc kubenswrapper[4703]: I0130 12:42:38.827086 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00bb680f-2c89-461d-9b79-b1dd9b3d8844-utilities\") pod \"community-operators-wfpl7\" (UID: \"00bb680f-2c89-461d-9b79-b1dd9b3d8844\") " pod="openshift-marketplace/community-operators-wfpl7" Jan 30 12:42:38 crc kubenswrapper[4703]: I0130 12:42:38.827224 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00bb680f-2c89-461d-9b79-b1dd9b3d8844-catalog-content\") pod \"community-operators-wfpl7\" (UID: \"00bb680f-2c89-461d-9b79-b1dd9b3d8844\") " pod="openshift-marketplace/community-operators-wfpl7" Jan 30 12:42:38 crc kubenswrapper[4703]: I0130 12:42:38.827573 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4sjvz\" (UniqueName: \"kubernetes.io/projected/00bb680f-2c89-461d-9b79-b1dd9b3d8844-kube-api-access-4sjvz\") pod \"community-operators-wfpl7\" (UID: \"00bb680f-2c89-461d-9b79-b1dd9b3d8844\") " pod="openshift-marketplace/community-operators-wfpl7" Jan 30 12:42:38 crc kubenswrapper[4703]: I0130 12:42:38.929816 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4sjvz\" (UniqueName: \"kubernetes.io/projected/00bb680f-2c89-461d-9b79-b1dd9b3d8844-kube-api-access-4sjvz\") pod \"community-operators-wfpl7\" (UID: \"00bb680f-2c89-461d-9b79-b1dd9b3d8844\") " pod="openshift-marketplace/community-operators-wfpl7" Jan 30 12:42:38 crc kubenswrapper[4703]: I0130 12:42:38.930007 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00bb680f-2c89-461d-9b79-b1dd9b3d8844-utilities\") pod \"community-operators-wfpl7\" (UID: \"00bb680f-2c89-461d-9b79-b1dd9b3d8844\") " pod="openshift-marketplace/community-operators-wfpl7" Jan 30 12:42:38 crc kubenswrapper[4703]: I0130 12:42:38.930040 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00bb680f-2c89-461d-9b79-b1dd9b3d8844-catalog-content\") pod \"community-operators-wfpl7\" (UID: \"00bb680f-2c89-461d-9b79-b1dd9b3d8844\") " pod="openshift-marketplace/community-operators-wfpl7" Jan 30 12:42:38 crc kubenswrapper[4703]: I0130 12:42:38.930889 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00bb680f-2c89-461d-9b79-b1dd9b3d8844-catalog-content\") pod \"community-operators-wfpl7\" (UID: \"00bb680f-2c89-461d-9b79-b1dd9b3d8844\") " pod="openshift-marketplace/community-operators-wfpl7" Jan 30 12:42:38 crc kubenswrapper[4703]: I0130 12:42:38.931165 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00bb680f-2c89-461d-9b79-b1dd9b3d8844-utilities\") pod \"community-operators-wfpl7\" (UID: \"00bb680f-2c89-461d-9b79-b1dd9b3d8844\") " pod="openshift-marketplace/community-operators-wfpl7" Jan 30 12:42:38 crc kubenswrapper[4703]: I0130 12:42:38.955904 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sjvz\" (UniqueName: \"kubernetes.io/projected/00bb680f-2c89-461d-9b79-b1dd9b3d8844-kube-api-access-4sjvz\") pod \"community-operators-wfpl7\" (UID: \"00bb680f-2c89-461d-9b79-b1dd9b3d8844\") " pod="openshift-marketplace/community-operators-wfpl7" Jan 30 12:42:39 crc kubenswrapper[4703]: I0130 12:42:39.094924 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wfpl7" Jan 30 12:42:39 crc kubenswrapper[4703]: I0130 12:42:39.668775 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wfpl7"] Jan 30 12:42:40 crc kubenswrapper[4703]: I0130 12:42:40.703857 4703 generic.go:334] "Generic (PLEG): container finished" podID="00bb680f-2c89-461d-9b79-b1dd9b3d8844" containerID="1029cef341c04c8f4405faf3de87dd2e347930fa38e64b6fcc79b8e43d6cd54b" exitCode=0 Jan 30 12:42:40 crc kubenswrapper[4703]: I0130 12:42:40.704066 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wfpl7" event={"ID":"00bb680f-2c89-461d-9b79-b1dd9b3d8844","Type":"ContainerDied","Data":"1029cef341c04c8f4405faf3de87dd2e347930fa38e64b6fcc79b8e43d6cd54b"} Jan 30 12:42:40 crc kubenswrapper[4703]: I0130 12:42:40.704422 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wfpl7" event={"ID":"00bb680f-2c89-461d-9b79-b1dd9b3d8844","Type":"ContainerStarted","Data":"4989eac443af48828159ff5a8710947eee12c2d9580859a646c31f4a48952606"} Jan 30 12:42:40 crc kubenswrapper[4703]: I0130 12:42:40.707328 4703 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 12:42:41 crc kubenswrapper[4703]: I0130 12:42:41.717950 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wfpl7" event={"ID":"00bb680f-2c89-461d-9b79-b1dd9b3d8844","Type":"ContainerStarted","Data":"04982233b87a60fdadd1f4166553d828ef3544e417e9c518b4d83190b8915f66"} Jan 30 12:42:42 crc kubenswrapper[4703]: I0130 12:42:42.734581 4703 generic.go:334] "Generic (PLEG): container finished" podID="00bb680f-2c89-461d-9b79-b1dd9b3d8844" containerID="04982233b87a60fdadd1f4166553d828ef3544e417e9c518b4d83190b8915f66" exitCode=0 Jan 30 12:42:42 crc kubenswrapper[4703]: I0130 12:42:42.734715 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wfpl7" event={"ID":"00bb680f-2c89-461d-9b79-b1dd9b3d8844","Type":"ContainerDied","Data":"04982233b87a60fdadd1f4166553d828ef3544e417e9c518b4d83190b8915f66"} Jan 30 12:42:43 crc kubenswrapper[4703]: I0130 12:42:43.751359 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wfpl7" event={"ID":"00bb680f-2c89-461d-9b79-b1dd9b3d8844","Type":"ContainerStarted","Data":"edf4ea4224a741d32a46074ea82231d5a70baa314e8b8f3c916ddc0daf463f6b"} Jan 30 12:42:43 crc kubenswrapper[4703]: I0130 12:42:43.778942 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wfpl7" podStartSLOduration=3.289619276 podStartE2EDuration="5.778906729s" podCreationTimestamp="2026-01-30 12:42:38 +0000 UTC" firstStartedPulling="2026-01-30 12:42:40.70698539 +0000 UTC m=+2796.484807044" lastFinishedPulling="2026-01-30 12:42:43.196272853 +0000 UTC m=+2798.974094497" observedRunningTime="2026-01-30 12:42:43.772967232 +0000 UTC m=+2799.550788886" watchObservedRunningTime="2026-01-30 12:42:43.778906729 +0000 UTC m=+2799.556728383" Jan 30 12:42:44 crc kubenswrapper[4703]: I0130 12:42:44.086196 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:42:44 crc kubenswrapper[4703]: E0130 12:42:44.086565 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:42:49 crc kubenswrapper[4703]: I0130 12:42:49.100898 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wfpl7" Jan 30 12:42:49 crc kubenswrapper[4703]: I0130 12:42:49.101797 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wfpl7" Jan 30 12:42:49 crc kubenswrapper[4703]: I0130 12:42:49.151863 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wfpl7" Jan 30 12:42:49 crc kubenswrapper[4703]: I0130 12:42:49.890937 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wfpl7" Jan 30 12:42:49 crc kubenswrapper[4703]: I0130 12:42:49.952761 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wfpl7"] Jan 30 12:42:51 crc kubenswrapper[4703]: I0130 12:42:51.874035 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wfpl7" podUID="00bb680f-2c89-461d-9b79-b1dd9b3d8844" containerName="registry-server" containerID="cri-o://edf4ea4224a741d32a46074ea82231d5a70baa314e8b8f3c916ddc0daf463f6b" gracePeriod=2 Jan 30 12:42:52 crc kubenswrapper[4703]: I0130 12:42:52.778376 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wfpl7" Jan 30 12:42:52 crc kubenswrapper[4703]: I0130 12:42:52.887382 4703 generic.go:334] "Generic (PLEG): container finished" podID="00bb680f-2c89-461d-9b79-b1dd9b3d8844" containerID="edf4ea4224a741d32a46074ea82231d5a70baa314e8b8f3c916ddc0daf463f6b" exitCode=0 Jan 30 12:42:52 crc kubenswrapper[4703]: I0130 12:42:52.887454 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wfpl7" event={"ID":"00bb680f-2c89-461d-9b79-b1dd9b3d8844","Type":"ContainerDied","Data":"edf4ea4224a741d32a46074ea82231d5a70baa314e8b8f3c916ddc0daf463f6b"} Jan 30 12:42:52 crc kubenswrapper[4703]: I0130 12:42:52.887536 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wfpl7" event={"ID":"00bb680f-2c89-461d-9b79-b1dd9b3d8844","Type":"ContainerDied","Data":"4989eac443af48828159ff5a8710947eee12c2d9580859a646c31f4a48952606"} Jan 30 12:42:52 crc kubenswrapper[4703]: I0130 12:42:52.887533 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wfpl7" Jan 30 12:42:52 crc kubenswrapper[4703]: I0130 12:42:52.887576 4703 scope.go:117] "RemoveContainer" containerID="edf4ea4224a741d32a46074ea82231d5a70baa314e8b8f3c916ddc0daf463f6b" Jan 30 12:42:52 crc kubenswrapper[4703]: I0130 12:42:52.916875 4703 scope.go:117] "RemoveContainer" containerID="04982233b87a60fdadd1f4166553d828ef3544e417e9c518b4d83190b8915f66" Jan 30 12:42:52 crc kubenswrapper[4703]: I0130 12:42:52.936609 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4sjvz\" (UniqueName: \"kubernetes.io/projected/00bb680f-2c89-461d-9b79-b1dd9b3d8844-kube-api-access-4sjvz\") pod \"00bb680f-2c89-461d-9b79-b1dd9b3d8844\" (UID: \"00bb680f-2c89-461d-9b79-b1dd9b3d8844\") " Jan 30 12:42:52 crc kubenswrapper[4703]: I0130 12:42:52.936685 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00bb680f-2c89-461d-9b79-b1dd9b3d8844-catalog-content\") pod \"00bb680f-2c89-461d-9b79-b1dd9b3d8844\" (UID: \"00bb680f-2c89-461d-9b79-b1dd9b3d8844\") " Jan 30 12:42:52 crc kubenswrapper[4703]: I0130 12:42:52.937040 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00bb680f-2c89-461d-9b79-b1dd9b3d8844-utilities\") pod \"00bb680f-2c89-461d-9b79-b1dd9b3d8844\" (UID: \"00bb680f-2c89-461d-9b79-b1dd9b3d8844\") " Jan 30 12:42:52 crc kubenswrapper[4703]: I0130 12:42:52.939406 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00bb680f-2c89-461d-9b79-b1dd9b3d8844-utilities" (OuterVolumeSpecName: "utilities") pod "00bb680f-2c89-461d-9b79-b1dd9b3d8844" (UID: "00bb680f-2c89-461d-9b79-b1dd9b3d8844"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:42:52 crc kubenswrapper[4703]: I0130 12:42:52.942504 4703 scope.go:117] "RemoveContainer" containerID="1029cef341c04c8f4405faf3de87dd2e347930fa38e64b6fcc79b8e43d6cd54b" Jan 30 12:42:52 crc kubenswrapper[4703]: I0130 12:42:52.946689 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00bb680f-2c89-461d-9b79-b1dd9b3d8844-kube-api-access-4sjvz" (OuterVolumeSpecName: "kube-api-access-4sjvz") pod "00bb680f-2c89-461d-9b79-b1dd9b3d8844" (UID: "00bb680f-2c89-461d-9b79-b1dd9b3d8844"). InnerVolumeSpecName "kube-api-access-4sjvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:42:53 crc kubenswrapper[4703]: I0130 12:42:53.041578 4703 scope.go:117] "RemoveContainer" containerID="edf4ea4224a741d32a46074ea82231d5a70baa314e8b8f3c916ddc0daf463f6b" Jan 30 12:42:53 crc kubenswrapper[4703]: I0130 12:42:53.041695 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/00bb680f-2c89-461d-9b79-b1dd9b3d8844-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:42:53 crc kubenswrapper[4703]: I0130 12:42:53.041729 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4sjvz\" (UniqueName: \"kubernetes.io/projected/00bb680f-2c89-461d-9b79-b1dd9b3d8844-kube-api-access-4sjvz\") on node \"crc\" DevicePath \"\"" Jan 30 12:42:53 crc kubenswrapper[4703]: E0130 12:42:53.042614 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edf4ea4224a741d32a46074ea82231d5a70baa314e8b8f3c916ddc0daf463f6b\": container with ID starting with edf4ea4224a741d32a46074ea82231d5a70baa314e8b8f3c916ddc0daf463f6b not found: ID does not exist" containerID="edf4ea4224a741d32a46074ea82231d5a70baa314e8b8f3c916ddc0daf463f6b" Jan 30 12:42:53 crc kubenswrapper[4703]: I0130 12:42:53.042655 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edf4ea4224a741d32a46074ea82231d5a70baa314e8b8f3c916ddc0daf463f6b"} err="failed to get container status \"edf4ea4224a741d32a46074ea82231d5a70baa314e8b8f3c916ddc0daf463f6b\": rpc error: code = NotFound desc = could not find container \"edf4ea4224a741d32a46074ea82231d5a70baa314e8b8f3c916ddc0daf463f6b\": container with ID starting with edf4ea4224a741d32a46074ea82231d5a70baa314e8b8f3c916ddc0daf463f6b not found: ID does not exist" Jan 30 12:42:53 crc kubenswrapper[4703]: I0130 12:42:53.042684 4703 scope.go:117] "RemoveContainer" containerID="04982233b87a60fdadd1f4166553d828ef3544e417e9c518b4d83190b8915f66" Jan 30 12:42:53 crc kubenswrapper[4703]: E0130 12:42:53.043381 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04982233b87a60fdadd1f4166553d828ef3544e417e9c518b4d83190b8915f66\": container with ID starting with 04982233b87a60fdadd1f4166553d828ef3544e417e9c518b4d83190b8915f66 not found: ID does not exist" containerID="04982233b87a60fdadd1f4166553d828ef3544e417e9c518b4d83190b8915f66" Jan 30 12:42:53 crc kubenswrapper[4703]: I0130 12:42:53.043425 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04982233b87a60fdadd1f4166553d828ef3544e417e9c518b4d83190b8915f66"} err="failed to get container status \"04982233b87a60fdadd1f4166553d828ef3544e417e9c518b4d83190b8915f66\": rpc error: code = NotFound desc = could not find container \"04982233b87a60fdadd1f4166553d828ef3544e417e9c518b4d83190b8915f66\": container with ID starting with 04982233b87a60fdadd1f4166553d828ef3544e417e9c518b4d83190b8915f66 not found: ID does not exist" Jan 30 12:42:53 crc kubenswrapper[4703]: I0130 12:42:53.043459 4703 scope.go:117] "RemoveContainer" containerID="1029cef341c04c8f4405faf3de87dd2e347930fa38e64b6fcc79b8e43d6cd54b" Jan 30 12:42:53 crc kubenswrapper[4703]: E0130 12:42:53.044015 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1029cef341c04c8f4405faf3de87dd2e347930fa38e64b6fcc79b8e43d6cd54b\": container with ID starting with 1029cef341c04c8f4405faf3de87dd2e347930fa38e64b6fcc79b8e43d6cd54b not found: ID does not exist" containerID="1029cef341c04c8f4405faf3de87dd2e347930fa38e64b6fcc79b8e43d6cd54b" Jan 30 12:42:53 crc kubenswrapper[4703]: I0130 12:42:53.044044 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1029cef341c04c8f4405faf3de87dd2e347930fa38e64b6fcc79b8e43d6cd54b"} err="failed to get container status \"1029cef341c04c8f4405faf3de87dd2e347930fa38e64b6fcc79b8e43d6cd54b\": rpc error: code = NotFound desc = could not find container \"1029cef341c04c8f4405faf3de87dd2e347930fa38e64b6fcc79b8e43d6cd54b\": container with ID starting with 1029cef341c04c8f4405faf3de87dd2e347930fa38e64b6fcc79b8e43d6cd54b not found: ID does not exist" Jan 30 12:42:53 crc kubenswrapper[4703]: I0130 12:42:53.373869 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/00bb680f-2c89-461d-9b79-b1dd9b3d8844-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "00bb680f-2c89-461d-9b79-b1dd9b3d8844" (UID: "00bb680f-2c89-461d-9b79-b1dd9b3d8844"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:42:53 crc kubenswrapper[4703]: I0130 12:42:53.454644 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/00bb680f-2c89-461d-9b79-b1dd9b3d8844-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:42:53 crc kubenswrapper[4703]: I0130 12:42:53.525294 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wfpl7"] Jan 30 12:42:53 crc kubenswrapper[4703]: I0130 12:42:53.538270 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wfpl7"] Jan 30 12:42:55 crc kubenswrapper[4703]: I0130 12:42:55.112297 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00bb680f-2c89-461d-9b79-b1dd9b3d8844" path="/var/lib/kubelet/pods/00bb680f-2c89-461d-9b79-b1dd9b3d8844/volumes" Jan 30 12:42:59 crc kubenswrapper[4703]: I0130 12:42:59.087945 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:42:59 crc kubenswrapper[4703]: E0130 12:42:59.089228 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:43:14 crc kubenswrapper[4703]: I0130 12:43:14.086871 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:43:14 crc kubenswrapper[4703]: E0130 12:43:14.088242 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:43:27 crc kubenswrapper[4703]: I0130 12:43:27.086957 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:43:27 crc kubenswrapper[4703]: E0130 12:43:27.088067 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:43:40 crc kubenswrapper[4703]: I0130 12:43:40.087559 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:43:40 crc kubenswrapper[4703]: E0130 12:43:40.088883 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:43:42 crc kubenswrapper[4703]: I0130 12:43:42.592414 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hhw4q"] Jan 30 12:43:42 crc kubenswrapper[4703]: E0130 12:43:42.593501 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00bb680f-2c89-461d-9b79-b1dd9b3d8844" containerName="extract-content" Jan 30 12:43:42 crc kubenswrapper[4703]: I0130 12:43:42.593517 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="00bb680f-2c89-461d-9b79-b1dd9b3d8844" containerName="extract-content" Jan 30 12:43:42 crc kubenswrapper[4703]: E0130 12:43:42.593532 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00bb680f-2c89-461d-9b79-b1dd9b3d8844" containerName="extract-utilities" Jan 30 12:43:42 crc kubenswrapper[4703]: I0130 12:43:42.593538 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="00bb680f-2c89-461d-9b79-b1dd9b3d8844" containerName="extract-utilities" Jan 30 12:43:42 crc kubenswrapper[4703]: E0130 12:43:42.593560 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00bb680f-2c89-461d-9b79-b1dd9b3d8844" containerName="registry-server" Jan 30 12:43:42 crc kubenswrapper[4703]: I0130 12:43:42.593568 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="00bb680f-2c89-461d-9b79-b1dd9b3d8844" containerName="registry-server" Jan 30 12:43:42 crc kubenswrapper[4703]: I0130 12:43:42.593801 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="00bb680f-2c89-461d-9b79-b1dd9b3d8844" containerName="registry-server" Jan 30 12:43:42 crc kubenswrapper[4703]: I0130 12:43:42.595581 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hhw4q" Jan 30 12:43:42 crc kubenswrapper[4703]: I0130 12:43:42.621286 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hhw4q"] Jan 30 12:43:42 crc kubenswrapper[4703]: I0130 12:43:42.697733 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nq7gr\" (UniqueName: \"kubernetes.io/projected/e3de862d-193c-49d8-93f3-c385c1b91645-kube-api-access-nq7gr\") pod \"certified-operators-hhw4q\" (UID: \"e3de862d-193c-49d8-93f3-c385c1b91645\") " pod="openshift-marketplace/certified-operators-hhw4q" Jan 30 12:43:42 crc kubenswrapper[4703]: I0130 12:43:42.697838 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3de862d-193c-49d8-93f3-c385c1b91645-utilities\") pod \"certified-operators-hhw4q\" (UID: \"e3de862d-193c-49d8-93f3-c385c1b91645\") " pod="openshift-marketplace/certified-operators-hhw4q" Jan 30 12:43:42 crc kubenswrapper[4703]: I0130 12:43:42.697896 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3de862d-193c-49d8-93f3-c385c1b91645-catalog-content\") pod \"certified-operators-hhw4q\" (UID: \"e3de862d-193c-49d8-93f3-c385c1b91645\") " pod="openshift-marketplace/certified-operators-hhw4q" Jan 30 12:43:42 crc kubenswrapper[4703]: I0130 12:43:42.801044 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nq7gr\" (UniqueName: \"kubernetes.io/projected/e3de862d-193c-49d8-93f3-c385c1b91645-kube-api-access-nq7gr\") pod \"certified-operators-hhw4q\" (UID: \"e3de862d-193c-49d8-93f3-c385c1b91645\") " pod="openshift-marketplace/certified-operators-hhw4q" Jan 30 12:43:42 crc kubenswrapper[4703]: I0130 12:43:42.801201 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3de862d-193c-49d8-93f3-c385c1b91645-utilities\") pod \"certified-operators-hhw4q\" (UID: \"e3de862d-193c-49d8-93f3-c385c1b91645\") " pod="openshift-marketplace/certified-operators-hhw4q" Jan 30 12:43:42 crc kubenswrapper[4703]: I0130 12:43:42.801269 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3de862d-193c-49d8-93f3-c385c1b91645-catalog-content\") pod \"certified-operators-hhw4q\" (UID: \"e3de862d-193c-49d8-93f3-c385c1b91645\") " pod="openshift-marketplace/certified-operators-hhw4q" Jan 30 12:43:42 crc kubenswrapper[4703]: I0130 12:43:42.801707 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3de862d-193c-49d8-93f3-c385c1b91645-utilities\") pod \"certified-operators-hhw4q\" (UID: \"e3de862d-193c-49d8-93f3-c385c1b91645\") " pod="openshift-marketplace/certified-operators-hhw4q" Jan 30 12:43:42 crc kubenswrapper[4703]: I0130 12:43:42.801860 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3de862d-193c-49d8-93f3-c385c1b91645-catalog-content\") pod \"certified-operators-hhw4q\" (UID: \"e3de862d-193c-49d8-93f3-c385c1b91645\") " pod="openshift-marketplace/certified-operators-hhw4q" Jan 30 12:43:42 crc kubenswrapper[4703]: I0130 12:43:42.824218 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nq7gr\" (UniqueName: \"kubernetes.io/projected/e3de862d-193c-49d8-93f3-c385c1b91645-kube-api-access-nq7gr\") pod \"certified-operators-hhw4q\" (UID: \"e3de862d-193c-49d8-93f3-c385c1b91645\") " pod="openshift-marketplace/certified-operators-hhw4q" Jan 30 12:43:42 crc kubenswrapper[4703]: I0130 12:43:42.919920 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hhw4q" Jan 30 12:43:43 crc kubenswrapper[4703]: I0130 12:43:43.561371 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hhw4q"] Jan 30 12:43:44 crc kubenswrapper[4703]: I0130 12:43:44.495418 4703 generic.go:334] "Generic (PLEG): container finished" podID="e3de862d-193c-49d8-93f3-c385c1b91645" containerID="9117ce356598a7684dc554b8fb0c938489a96b9eed7bb3294cc438a02fff36a1" exitCode=0 Jan 30 12:43:44 crc kubenswrapper[4703]: I0130 12:43:44.495542 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhw4q" event={"ID":"e3de862d-193c-49d8-93f3-c385c1b91645","Type":"ContainerDied","Data":"9117ce356598a7684dc554b8fb0c938489a96b9eed7bb3294cc438a02fff36a1"} Jan 30 12:43:44 crc kubenswrapper[4703]: I0130 12:43:44.495977 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhw4q" event={"ID":"e3de862d-193c-49d8-93f3-c385c1b91645","Type":"ContainerStarted","Data":"34919cde78b7c4272a172a6ab3db2504361d2fd1e26e87bf92c14bba8f557cd2"} Jan 30 12:43:45 crc kubenswrapper[4703]: I0130 12:43:45.509039 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhw4q" event={"ID":"e3de862d-193c-49d8-93f3-c385c1b91645","Type":"ContainerStarted","Data":"6674d8c674e9bfa03c645adb5f27aa3964dcef91ead1c6b390f7d0c2d149609f"} Jan 30 12:43:46 crc kubenswrapper[4703]: I0130 12:43:46.520948 4703 generic.go:334] "Generic (PLEG): container finished" podID="e3de862d-193c-49d8-93f3-c385c1b91645" containerID="6674d8c674e9bfa03c645adb5f27aa3964dcef91ead1c6b390f7d0c2d149609f" exitCode=0 Jan 30 12:43:46 crc kubenswrapper[4703]: I0130 12:43:46.521053 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhw4q" event={"ID":"e3de862d-193c-49d8-93f3-c385c1b91645","Type":"ContainerDied","Data":"6674d8c674e9bfa03c645adb5f27aa3964dcef91ead1c6b390f7d0c2d149609f"} Jan 30 12:43:47 crc kubenswrapper[4703]: I0130 12:43:47.535723 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhw4q" event={"ID":"e3de862d-193c-49d8-93f3-c385c1b91645","Type":"ContainerStarted","Data":"b693546e5ac74b9192a4a45af4d7e3b93323e64bbf90a373b880f40aabb572d5"} Jan 30 12:43:47 crc kubenswrapper[4703]: I0130 12:43:47.563547 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hhw4q" podStartSLOduration=3.172128217 podStartE2EDuration="5.56352273s" podCreationTimestamp="2026-01-30 12:43:42 +0000 UTC" firstStartedPulling="2026-01-30 12:43:44.500271674 +0000 UTC m=+2860.278093328" lastFinishedPulling="2026-01-30 12:43:46.891666187 +0000 UTC m=+2862.669487841" observedRunningTime="2026-01-30 12:43:47.555504058 +0000 UTC m=+2863.333325712" watchObservedRunningTime="2026-01-30 12:43:47.56352273 +0000 UTC m=+2863.341344384" Jan 30 12:43:52 crc kubenswrapper[4703]: I0130 12:43:52.920291 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hhw4q" Jan 30 12:43:52 crc kubenswrapper[4703]: I0130 12:43:52.921002 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hhw4q" Jan 30 12:43:52 crc kubenswrapper[4703]: I0130 12:43:52.972619 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hhw4q" Jan 30 12:43:53 crc kubenswrapper[4703]: I0130 12:43:53.660287 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hhw4q" Jan 30 12:43:53 crc kubenswrapper[4703]: I0130 12:43:53.724907 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hhw4q"] Jan 30 12:43:55 crc kubenswrapper[4703]: I0130 12:43:55.093867 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:43:55 crc kubenswrapper[4703]: E0130 12:43:55.094266 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:43:55 crc kubenswrapper[4703]: I0130 12:43:55.622109 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hhw4q" podUID="e3de862d-193c-49d8-93f3-c385c1b91645" containerName="registry-server" containerID="cri-o://b693546e5ac74b9192a4a45af4d7e3b93323e64bbf90a373b880f40aabb572d5" gracePeriod=2 Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.185236 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hhw4q" Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.317924 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3de862d-193c-49d8-93f3-c385c1b91645-utilities\") pod \"e3de862d-193c-49d8-93f3-c385c1b91645\" (UID: \"e3de862d-193c-49d8-93f3-c385c1b91645\") " Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.318627 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3de862d-193c-49d8-93f3-c385c1b91645-catalog-content\") pod \"e3de862d-193c-49d8-93f3-c385c1b91645\" (UID: \"e3de862d-193c-49d8-93f3-c385c1b91645\") " Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.318843 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nq7gr\" (UniqueName: \"kubernetes.io/projected/e3de862d-193c-49d8-93f3-c385c1b91645-kube-api-access-nq7gr\") pod \"e3de862d-193c-49d8-93f3-c385c1b91645\" (UID: \"e3de862d-193c-49d8-93f3-c385c1b91645\") " Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.319087 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3de862d-193c-49d8-93f3-c385c1b91645-utilities" (OuterVolumeSpecName: "utilities") pod "e3de862d-193c-49d8-93f3-c385c1b91645" (UID: "e3de862d-193c-49d8-93f3-c385c1b91645"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.320023 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3de862d-193c-49d8-93f3-c385c1b91645-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.335441 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3de862d-193c-49d8-93f3-c385c1b91645-kube-api-access-nq7gr" (OuterVolumeSpecName: "kube-api-access-nq7gr") pod "e3de862d-193c-49d8-93f3-c385c1b91645" (UID: "e3de862d-193c-49d8-93f3-c385c1b91645"). InnerVolumeSpecName "kube-api-access-nq7gr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.388842 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3de862d-193c-49d8-93f3-c385c1b91645-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e3de862d-193c-49d8-93f3-c385c1b91645" (UID: "e3de862d-193c-49d8-93f3-c385c1b91645"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.422843 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nq7gr\" (UniqueName: \"kubernetes.io/projected/e3de862d-193c-49d8-93f3-c385c1b91645-kube-api-access-nq7gr\") on node \"crc\" DevicePath \"\"" Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.422891 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3de862d-193c-49d8-93f3-c385c1b91645-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.635260 4703 generic.go:334] "Generic (PLEG): container finished" podID="e3de862d-193c-49d8-93f3-c385c1b91645" containerID="b693546e5ac74b9192a4a45af4d7e3b93323e64bbf90a373b880f40aabb572d5" exitCode=0 Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.635350 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhw4q" event={"ID":"e3de862d-193c-49d8-93f3-c385c1b91645","Type":"ContainerDied","Data":"b693546e5ac74b9192a4a45af4d7e3b93323e64bbf90a373b880f40aabb572d5"} Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.635395 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hhw4q" event={"ID":"e3de862d-193c-49d8-93f3-c385c1b91645","Type":"ContainerDied","Data":"34919cde78b7c4272a172a6ab3db2504361d2fd1e26e87bf92c14bba8f557cd2"} Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.635417 4703 scope.go:117] "RemoveContainer" containerID="b693546e5ac74b9192a4a45af4d7e3b93323e64bbf90a373b880f40aabb572d5" Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.635613 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hhw4q" Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.676614 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hhw4q"] Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.683651 4703 scope.go:117] "RemoveContainer" containerID="6674d8c674e9bfa03c645adb5f27aa3964dcef91ead1c6b390f7d0c2d149609f" Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.686972 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hhw4q"] Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.740395 4703 scope.go:117] "RemoveContainer" containerID="9117ce356598a7684dc554b8fb0c938489a96b9eed7bb3294cc438a02fff36a1" Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.817685 4703 scope.go:117] "RemoveContainer" containerID="b693546e5ac74b9192a4a45af4d7e3b93323e64bbf90a373b880f40aabb572d5" Jan 30 12:43:56 crc kubenswrapper[4703]: E0130 12:43:56.819069 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b693546e5ac74b9192a4a45af4d7e3b93323e64bbf90a373b880f40aabb572d5\": container with ID starting with b693546e5ac74b9192a4a45af4d7e3b93323e64bbf90a373b880f40aabb572d5 not found: ID does not exist" containerID="b693546e5ac74b9192a4a45af4d7e3b93323e64bbf90a373b880f40aabb572d5" Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.819138 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b693546e5ac74b9192a4a45af4d7e3b93323e64bbf90a373b880f40aabb572d5"} err="failed to get container status \"b693546e5ac74b9192a4a45af4d7e3b93323e64bbf90a373b880f40aabb572d5\": rpc error: code = NotFound desc = could not find container \"b693546e5ac74b9192a4a45af4d7e3b93323e64bbf90a373b880f40aabb572d5\": container with ID starting with b693546e5ac74b9192a4a45af4d7e3b93323e64bbf90a373b880f40aabb572d5 not found: ID does not exist" Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.819180 4703 scope.go:117] "RemoveContainer" containerID="6674d8c674e9bfa03c645adb5f27aa3964dcef91ead1c6b390f7d0c2d149609f" Jan 30 12:43:56 crc kubenswrapper[4703]: E0130 12:43:56.819749 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6674d8c674e9bfa03c645adb5f27aa3964dcef91ead1c6b390f7d0c2d149609f\": container with ID starting with 6674d8c674e9bfa03c645adb5f27aa3964dcef91ead1c6b390f7d0c2d149609f not found: ID does not exist" containerID="6674d8c674e9bfa03c645adb5f27aa3964dcef91ead1c6b390f7d0c2d149609f" Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.819783 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6674d8c674e9bfa03c645adb5f27aa3964dcef91ead1c6b390f7d0c2d149609f"} err="failed to get container status \"6674d8c674e9bfa03c645adb5f27aa3964dcef91ead1c6b390f7d0c2d149609f\": rpc error: code = NotFound desc = could not find container \"6674d8c674e9bfa03c645adb5f27aa3964dcef91ead1c6b390f7d0c2d149609f\": container with ID starting with 6674d8c674e9bfa03c645adb5f27aa3964dcef91ead1c6b390f7d0c2d149609f not found: ID does not exist" Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.819802 4703 scope.go:117] "RemoveContainer" containerID="9117ce356598a7684dc554b8fb0c938489a96b9eed7bb3294cc438a02fff36a1" Jan 30 12:43:56 crc kubenswrapper[4703]: E0130 12:43:56.820198 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9117ce356598a7684dc554b8fb0c938489a96b9eed7bb3294cc438a02fff36a1\": container with ID starting with 9117ce356598a7684dc554b8fb0c938489a96b9eed7bb3294cc438a02fff36a1 not found: ID does not exist" containerID="9117ce356598a7684dc554b8fb0c938489a96b9eed7bb3294cc438a02fff36a1" Jan 30 12:43:56 crc kubenswrapper[4703]: I0130 12:43:56.820242 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9117ce356598a7684dc554b8fb0c938489a96b9eed7bb3294cc438a02fff36a1"} err="failed to get container status \"9117ce356598a7684dc554b8fb0c938489a96b9eed7bb3294cc438a02fff36a1\": rpc error: code = NotFound desc = could not find container \"9117ce356598a7684dc554b8fb0c938489a96b9eed7bb3294cc438a02fff36a1\": container with ID starting with 9117ce356598a7684dc554b8fb0c938489a96b9eed7bb3294cc438a02fff36a1 not found: ID does not exist" Jan 30 12:43:57 crc kubenswrapper[4703]: I0130 12:43:57.104329 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3de862d-193c-49d8-93f3-c385c1b91645" path="/var/lib/kubelet/pods/e3de862d-193c-49d8-93f3-c385c1b91645/volumes" Jan 30 12:44:10 crc kubenswrapper[4703]: I0130 12:44:10.128817 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:44:10 crc kubenswrapper[4703]: E0130 12:44:10.130218 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:44:12 crc kubenswrapper[4703]: I0130 12:44:12.823547 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:44:12 crc kubenswrapper[4703]: I0130 12:44:12.824204 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:44:25 crc kubenswrapper[4703]: I0130 12:44:25.094623 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:44:25 crc kubenswrapper[4703]: E0130 12:44:25.097573 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:44:32 crc kubenswrapper[4703]: I0130 12:44:32.038705 4703 generic.go:334] "Generic (PLEG): container finished" podID="79c7c216-3c2c-4744-b6c5-d97feb476cdd" containerID="e1905c0db3c8b170f6837c6a3b1467b5927bed2807dcee978938ca1565ecf511" exitCode=0 Jan 30 12:44:32 crc kubenswrapper[4703]: I0130 12:44:32.038801 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" event={"ID":"79c7c216-3c2c-4744-b6c5-d97feb476cdd","Type":"ContainerDied","Data":"e1905c0db3c8b170f6837c6a3b1467b5927bed2807dcee978938ca1565ecf511"} Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.544320 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.719154 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-migration-ssh-key-1\") pod \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.719367 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-combined-ca-bundle\") pod \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.719424 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-cell1-compute-config-0\") pod \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.719515 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-cell1-compute-config-1\") pod \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.719556 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l5pvd\" (UniqueName: \"kubernetes.io/projected/79c7c216-3c2c-4744-b6c5-d97feb476cdd-kube-api-access-l5pvd\") pod \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.719589 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-extra-config-0\") pod \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.719688 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-migration-ssh-key-0\") pod \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.719719 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-ssh-key-openstack-edpm-ipam\") pod \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.719777 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-inventory\") pod \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\" (UID: \"79c7c216-3c2c-4744-b6c5-d97feb476cdd\") " Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.728492 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "79c7c216-3c2c-4744-b6c5-d97feb476cdd" (UID: "79c7c216-3c2c-4744-b6c5-d97feb476cdd"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.735397 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79c7c216-3c2c-4744-b6c5-d97feb476cdd-kube-api-access-l5pvd" (OuterVolumeSpecName: "kube-api-access-l5pvd") pod "79c7c216-3c2c-4744-b6c5-d97feb476cdd" (UID: "79c7c216-3c2c-4744-b6c5-d97feb476cdd"). InnerVolumeSpecName "kube-api-access-l5pvd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.763504 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-inventory" (OuterVolumeSpecName: "inventory") pod "79c7c216-3c2c-4744-b6c5-d97feb476cdd" (UID: "79c7c216-3c2c-4744-b6c5-d97feb476cdd"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.763537 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "79c7c216-3c2c-4744-b6c5-d97feb476cdd" (UID: "79c7c216-3c2c-4744-b6c5-d97feb476cdd"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.764320 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "79c7c216-3c2c-4744-b6c5-d97feb476cdd" (UID: "79c7c216-3c2c-4744-b6c5-d97feb476cdd"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.773981 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "79c7c216-3c2c-4744-b6c5-d97feb476cdd" (UID: "79c7c216-3c2c-4744-b6c5-d97feb476cdd"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.774071 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "79c7c216-3c2c-4744-b6c5-d97feb476cdd" (UID: "79c7c216-3c2c-4744-b6c5-d97feb476cdd"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.774109 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "79c7c216-3c2c-4744-b6c5-d97feb476cdd" (UID: "79c7c216-3c2c-4744-b6c5-d97feb476cdd"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.781878 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "79c7c216-3c2c-4744-b6c5-d97feb476cdd" (UID: "79c7c216-3c2c-4744-b6c5-d97feb476cdd"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.822924 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.822981 4703 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-inventory\") on node \"crc\" DevicePath \"\"" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.822992 4703 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.823004 4703 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.823013 4703 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.823021 4703 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.823031 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l5pvd\" (UniqueName: \"kubernetes.io/projected/79c7c216-3c2c-4744-b6c5-d97feb476cdd-kube-api-access-l5pvd\") on node \"crc\" DevicePath \"\"" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.823043 4703 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:44:33 crc kubenswrapper[4703]: I0130 12:44:33.823052 4703 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/79c7c216-3c2c-4744-b6c5-d97feb476cdd-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.059373 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" event={"ID":"79c7c216-3c2c-4744-b6c5-d97feb476cdd","Type":"ContainerDied","Data":"b304e32677e488024d19ae7022478c2f5382c9f9d580f33115d94079d2f4aa63"} Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.059881 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b304e32677e488024d19ae7022478c2f5382c9f9d580f33115d94079d2f4aa63" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.059497 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-d9klv" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.188983 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql"] Jan 30 12:44:34 crc kubenswrapper[4703]: E0130 12:44:34.189554 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3de862d-193c-49d8-93f3-c385c1b91645" containerName="extract-content" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.189579 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3de862d-193c-49d8-93f3-c385c1b91645" containerName="extract-content" Jan 30 12:44:34 crc kubenswrapper[4703]: E0130 12:44:34.189606 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79c7c216-3c2c-4744-b6c5-d97feb476cdd" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.189616 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="79c7c216-3c2c-4744-b6c5-d97feb476cdd" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 30 12:44:34 crc kubenswrapper[4703]: E0130 12:44:34.189676 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3de862d-193c-49d8-93f3-c385c1b91645" containerName="registry-server" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.189872 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3de862d-193c-49d8-93f3-c385c1b91645" containerName="registry-server" Jan 30 12:44:34 crc kubenswrapper[4703]: E0130 12:44:34.189887 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3de862d-193c-49d8-93f3-c385c1b91645" containerName="extract-utilities" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.189893 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3de862d-193c-49d8-93f3-c385c1b91645" containerName="extract-utilities" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.190166 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="79c7c216-3c2c-4744-b6c5-d97feb476cdd" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.190190 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3de862d-193c-49d8-93f3-c385c1b91645" containerName="registry-server" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.191432 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.194801 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.194863 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.195037 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-jjdl5" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.195167 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.195292 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.202062 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql"] Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.334899 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjmv7\" (UniqueName: \"kubernetes.io/projected/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-kube-api-access-rjmv7\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.334980 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.335072 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.335278 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.335320 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.335402 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.335440 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.438572 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.438648 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.438958 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.438988 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.439244 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjmv7\" (UniqueName: \"kubernetes.io/projected/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-kube-api-access-rjmv7\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.439297 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.439356 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.445287 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.446797 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.447035 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.447266 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.447289 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.455798 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.460782 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjmv7\" (UniqueName: \"kubernetes.io/projected/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-kube-api-access-rjmv7\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:34 crc kubenswrapper[4703]: I0130 12:44:34.525184 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:44:35 crc kubenswrapper[4703]: I0130 12:44:35.132962 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql"] Jan 30 12:44:36 crc kubenswrapper[4703]: I0130 12:44:36.083559 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" event={"ID":"0563aaa6-b1b3-411e-8ff0-4ef8986fa932","Type":"ContainerStarted","Data":"563e2f69f0173baefdbed58a23bb70adb04ceb46021ee484db9866a4f8d2dbfd"} Jan 30 12:44:36 crc kubenswrapper[4703]: I0130 12:44:36.084502 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" event={"ID":"0563aaa6-b1b3-411e-8ff0-4ef8986fa932","Type":"ContainerStarted","Data":"c900c05d55fa9fb902444f918da79ea305e3f61394e18805fa4c75da7d3b92bc"} Jan 30 12:44:37 crc kubenswrapper[4703]: I0130 12:44:37.262866 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" podStartSLOduration=2.851615095 podStartE2EDuration="3.262838594s" podCreationTimestamp="2026-01-30 12:44:34 +0000 UTC" firstStartedPulling="2026-01-30 12:44:35.145972814 +0000 UTC m=+2910.923794468" lastFinishedPulling="2026-01-30 12:44:35.557196313 +0000 UTC m=+2911.335017967" observedRunningTime="2026-01-30 12:44:36.114991998 +0000 UTC m=+2911.892813652" watchObservedRunningTime="2026-01-30 12:44:37.262838594 +0000 UTC m=+2913.040660248" Jan 30 12:44:37 crc kubenswrapper[4703]: I0130 12:44:37.271801 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dwq4s"] Jan 30 12:44:37 crc kubenswrapper[4703]: I0130 12:44:37.275903 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwq4s" Jan 30 12:44:37 crc kubenswrapper[4703]: I0130 12:44:37.290382 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwq4s"] Jan 30 12:44:37 crc kubenswrapper[4703]: I0130 12:44:37.405693 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9r6ck\" (UniqueName: \"kubernetes.io/projected/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-kube-api-access-9r6ck\") pod \"redhat-marketplace-dwq4s\" (UID: \"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7\") " pod="openshift-marketplace/redhat-marketplace-dwq4s" Jan 30 12:44:37 crc kubenswrapper[4703]: I0130 12:44:37.405753 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-catalog-content\") pod \"redhat-marketplace-dwq4s\" (UID: \"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7\") " pod="openshift-marketplace/redhat-marketplace-dwq4s" Jan 30 12:44:37 crc kubenswrapper[4703]: I0130 12:44:37.405868 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-utilities\") pod \"redhat-marketplace-dwq4s\" (UID: \"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7\") " pod="openshift-marketplace/redhat-marketplace-dwq4s" Jan 30 12:44:37 crc kubenswrapper[4703]: I0130 12:44:37.508449 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9r6ck\" (UniqueName: \"kubernetes.io/projected/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-kube-api-access-9r6ck\") pod \"redhat-marketplace-dwq4s\" (UID: \"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7\") " pod="openshift-marketplace/redhat-marketplace-dwq4s" Jan 30 12:44:37 crc kubenswrapper[4703]: I0130 12:44:37.508500 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-catalog-content\") pod \"redhat-marketplace-dwq4s\" (UID: \"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7\") " pod="openshift-marketplace/redhat-marketplace-dwq4s" Jan 30 12:44:37 crc kubenswrapper[4703]: I0130 12:44:37.508544 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-utilities\") pod \"redhat-marketplace-dwq4s\" (UID: \"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7\") " pod="openshift-marketplace/redhat-marketplace-dwq4s" Jan 30 12:44:37 crc kubenswrapper[4703]: I0130 12:44:37.509154 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-catalog-content\") pod \"redhat-marketplace-dwq4s\" (UID: \"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7\") " pod="openshift-marketplace/redhat-marketplace-dwq4s" Jan 30 12:44:37 crc kubenswrapper[4703]: I0130 12:44:37.509321 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-utilities\") pod \"redhat-marketplace-dwq4s\" (UID: \"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7\") " pod="openshift-marketplace/redhat-marketplace-dwq4s" Jan 30 12:44:37 crc kubenswrapper[4703]: I0130 12:44:37.531723 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9r6ck\" (UniqueName: \"kubernetes.io/projected/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-kube-api-access-9r6ck\") pod \"redhat-marketplace-dwq4s\" (UID: \"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7\") " pod="openshift-marketplace/redhat-marketplace-dwq4s" Jan 30 12:44:37 crc kubenswrapper[4703]: I0130 12:44:37.602659 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwq4s" Jan 30 12:44:38 crc kubenswrapper[4703]: I0130 12:44:38.087160 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:44:38 crc kubenswrapper[4703]: E0130 12:44:38.087986 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:44:38 crc kubenswrapper[4703]: I0130 12:44:38.158322 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwq4s"] Jan 30 12:44:39 crc kubenswrapper[4703]: I0130 12:44:39.192516 4703 generic.go:334] "Generic (PLEG): container finished" podID="2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7" containerID="3c1f2dc8722a6069f94420daee2468a71610e06c129ec6b914383d09341723f2" exitCode=0 Jan 30 12:44:39 crc kubenswrapper[4703]: I0130 12:44:39.192842 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwq4s" event={"ID":"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7","Type":"ContainerDied","Data":"3c1f2dc8722a6069f94420daee2468a71610e06c129ec6b914383d09341723f2"} Jan 30 12:44:39 crc kubenswrapper[4703]: I0130 12:44:39.192880 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwq4s" event={"ID":"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7","Type":"ContainerStarted","Data":"1bf1a2f9d598733c75ec89bb5fe7a4581eb8223845f255a7caaeaa203dd07bd9"} Jan 30 12:44:41 crc kubenswrapper[4703]: I0130 12:44:41.216994 4703 generic.go:334] "Generic (PLEG): container finished" podID="2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7" containerID="b8d92087370ded67ba4280bfd2aa5187644882ee4769708ab79b8d45834ca8e9" exitCode=0 Jan 30 12:44:41 crc kubenswrapper[4703]: I0130 12:44:41.217135 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwq4s" event={"ID":"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7","Type":"ContainerDied","Data":"b8d92087370ded67ba4280bfd2aa5187644882ee4769708ab79b8d45834ca8e9"} Jan 30 12:44:42 crc kubenswrapper[4703]: I0130 12:44:42.232006 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwq4s" event={"ID":"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7","Type":"ContainerStarted","Data":"55fe0540558cc50239db857398c26ae00ee86ab6cb9810e435bf3cd5bbca62bd"} Jan 30 12:44:42 crc kubenswrapper[4703]: I0130 12:44:42.257329 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dwq4s" podStartSLOduration=2.853716955 podStartE2EDuration="5.257300739s" podCreationTimestamp="2026-01-30 12:44:37 +0000 UTC" firstStartedPulling="2026-01-30 12:44:39.196252802 +0000 UTC m=+2914.974074456" lastFinishedPulling="2026-01-30 12:44:41.599836576 +0000 UTC m=+2917.377658240" observedRunningTime="2026-01-30 12:44:42.251332592 +0000 UTC m=+2918.029154256" watchObservedRunningTime="2026-01-30 12:44:42.257300739 +0000 UTC m=+2918.035122393" Jan 30 12:44:42 crc kubenswrapper[4703]: I0130 12:44:42.823021 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:44:42 crc kubenswrapper[4703]: I0130 12:44:42.823151 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:44:47 crc kubenswrapper[4703]: I0130 12:44:47.603922 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dwq4s" Jan 30 12:44:47 crc kubenswrapper[4703]: I0130 12:44:47.604962 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dwq4s" Jan 30 12:44:47 crc kubenswrapper[4703]: I0130 12:44:47.661511 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dwq4s" Jan 30 12:44:48 crc kubenswrapper[4703]: I0130 12:44:48.346259 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dwq4s" Jan 30 12:44:52 crc kubenswrapper[4703]: I0130 12:44:52.087386 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:44:52 crc kubenswrapper[4703]: E0130 12:44:52.088303 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:44:53 crc kubenswrapper[4703]: I0130 12:44:53.461859 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwq4s"] Jan 30 12:44:53 crc kubenswrapper[4703]: I0130 12:44:53.463513 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dwq4s" podUID="2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7" containerName="registry-server" containerID="cri-o://55fe0540558cc50239db857398c26ae00ee86ab6cb9810e435bf3cd5bbca62bd" gracePeriod=2 Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.050323 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwq4s" Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.216526 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-utilities\") pod \"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7\" (UID: \"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7\") " Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.216699 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-catalog-content\") pod \"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7\" (UID: \"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7\") " Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.216896 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9r6ck\" (UniqueName: \"kubernetes.io/projected/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-kube-api-access-9r6ck\") pod \"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7\" (UID: \"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7\") " Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.218199 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-utilities" (OuterVolumeSpecName: "utilities") pod "2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7" (UID: "2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.233214 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-kube-api-access-9r6ck" (OuterVolumeSpecName: "kube-api-access-9r6ck") pod "2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7" (UID: "2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7"). InnerVolumeSpecName "kube-api-access-9r6ck". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.245253 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7" (UID: "2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.320422 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9r6ck\" (UniqueName: \"kubernetes.io/projected/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-kube-api-access-9r6ck\") on node \"crc\" DevicePath \"\"" Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.320497 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.320511 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.363968 4703 generic.go:334] "Generic (PLEG): container finished" podID="2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7" containerID="55fe0540558cc50239db857398c26ae00ee86ab6cb9810e435bf3cd5bbca62bd" exitCode=0 Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.364025 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwq4s" event={"ID":"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7","Type":"ContainerDied","Data":"55fe0540558cc50239db857398c26ae00ee86ab6cb9810e435bf3cd5bbca62bd"} Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.364066 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwq4s" event={"ID":"2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7","Type":"ContainerDied","Data":"1bf1a2f9d598733c75ec89bb5fe7a4581eb8223845f255a7caaeaa203dd07bd9"} Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.364087 4703 scope.go:117] "RemoveContainer" containerID="55fe0540558cc50239db857398c26ae00ee86ab6cb9810e435bf3cd5bbca62bd" Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.364094 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwq4s" Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.409034 4703 scope.go:117] "RemoveContainer" containerID="b8d92087370ded67ba4280bfd2aa5187644882ee4769708ab79b8d45834ca8e9" Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.411555 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwq4s"] Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.423155 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwq4s"] Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.437948 4703 scope.go:117] "RemoveContainer" containerID="3c1f2dc8722a6069f94420daee2468a71610e06c129ec6b914383d09341723f2" Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.484313 4703 scope.go:117] "RemoveContainer" containerID="55fe0540558cc50239db857398c26ae00ee86ab6cb9810e435bf3cd5bbca62bd" Jan 30 12:44:54 crc kubenswrapper[4703]: E0130 12:44:54.484997 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55fe0540558cc50239db857398c26ae00ee86ab6cb9810e435bf3cd5bbca62bd\": container with ID starting with 55fe0540558cc50239db857398c26ae00ee86ab6cb9810e435bf3cd5bbca62bd not found: ID does not exist" containerID="55fe0540558cc50239db857398c26ae00ee86ab6cb9810e435bf3cd5bbca62bd" Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.485053 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55fe0540558cc50239db857398c26ae00ee86ab6cb9810e435bf3cd5bbca62bd"} err="failed to get container status \"55fe0540558cc50239db857398c26ae00ee86ab6cb9810e435bf3cd5bbca62bd\": rpc error: code = NotFound desc = could not find container \"55fe0540558cc50239db857398c26ae00ee86ab6cb9810e435bf3cd5bbca62bd\": container with ID starting with 55fe0540558cc50239db857398c26ae00ee86ab6cb9810e435bf3cd5bbca62bd not found: ID does not exist" Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.485092 4703 scope.go:117] "RemoveContainer" containerID="b8d92087370ded67ba4280bfd2aa5187644882ee4769708ab79b8d45834ca8e9" Jan 30 12:44:54 crc kubenswrapper[4703]: E0130 12:44:54.485511 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8d92087370ded67ba4280bfd2aa5187644882ee4769708ab79b8d45834ca8e9\": container with ID starting with b8d92087370ded67ba4280bfd2aa5187644882ee4769708ab79b8d45834ca8e9 not found: ID does not exist" containerID="b8d92087370ded67ba4280bfd2aa5187644882ee4769708ab79b8d45834ca8e9" Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.485571 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8d92087370ded67ba4280bfd2aa5187644882ee4769708ab79b8d45834ca8e9"} err="failed to get container status \"b8d92087370ded67ba4280bfd2aa5187644882ee4769708ab79b8d45834ca8e9\": rpc error: code = NotFound desc = could not find container \"b8d92087370ded67ba4280bfd2aa5187644882ee4769708ab79b8d45834ca8e9\": container with ID starting with b8d92087370ded67ba4280bfd2aa5187644882ee4769708ab79b8d45834ca8e9 not found: ID does not exist" Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.485613 4703 scope.go:117] "RemoveContainer" containerID="3c1f2dc8722a6069f94420daee2468a71610e06c129ec6b914383d09341723f2" Jan 30 12:44:54 crc kubenswrapper[4703]: E0130 12:44:54.485884 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c1f2dc8722a6069f94420daee2468a71610e06c129ec6b914383d09341723f2\": container with ID starting with 3c1f2dc8722a6069f94420daee2468a71610e06c129ec6b914383d09341723f2 not found: ID does not exist" containerID="3c1f2dc8722a6069f94420daee2468a71610e06c129ec6b914383d09341723f2" Jan 30 12:44:54 crc kubenswrapper[4703]: I0130 12:44:54.485934 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c1f2dc8722a6069f94420daee2468a71610e06c129ec6b914383d09341723f2"} err="failed to get container status \"3c1f2dc8722a6069f94420daee2468a71610e06c129ec6b914383d09341723f2\": rpc error: code = NotFound desc = could not find container \"3c1f2dc8722a6069f94420daee2468a71610e06c129ec6b914383d09341723f2\": container with ID starting with 3c1f2dc8722a6069f94420daee2468a71610e06c129ec6b914383d09341723f2 not found: ID does not exist" Jan 30 12:44:55 crc kubenswrapper[4703]: I0130 12:44:55.100285 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7" path="/var/lib/kubelet/pods/2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7/volumes" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.180468 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp"] Jan 30 12:45:00 crc kubenswrapper[4703]: E0130 12:45:00.183409 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7" containerName="extract-utilities" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.183465 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7" containerName="extract-utilities" Jan 30 12:45:00 crc kubenswrapper[4703]: E0130 12:45:00.183492 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7" containerName="registry-server" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.183504 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7" containerName="registry-server" Jan 30 12:45:00 crc kubenswrapper[4703]: E0130 12:45:00.183557 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7" containerName="extract-content" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.183566 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7" containerName="extract-content" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.183863 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f6bd0c2-5eae-489c-8ad9-7c65ddf087f7" containerName="registry-server" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.185145 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.190338 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.191661 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.219607 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp"] Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.294298 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/22d51638-ba9b-4627-b5dc-802683a96d22-config-volume\") pod \"collect-profiles-29496285-mzjhp\" (UID: \"22d51638-ba9b-4627-b5dc-802683a96d22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.295042 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrd4d\" (UniqueName: \"kubernetes.io/projected/22d51638-ba9b-4627-b5dc-802683a96d22-kube-api-access-vrd4d\") pod \"collect-profiles-29496285-mzjhp\" (UID: \"22d51638-ba9b-4627-b5dc-802683a96d22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.295833 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/22d51638-ba9b-4627-b5dc-802683a96d22-secret-volume\") pod \"collect-profiles-29496285-mzjhp\" (UID: \"22d51638-ba9b-4627-b5dc-802683a96d22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.398271 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/22d51638-ba9b-4627-b5dc-802683a96d22-secret-volume\") pod \"collect-profiles-29496285-mzjhp\" (UID: \"22d51638-ba9b-4627-b5dc-802683a96d22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.398341 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/22d51638-ba9b-4627-b5dc-802683a96d22-config-volume\") pod \"collect-profiles-29496285-mzjhp\" (UID: \"22d51638-ba9b-4627-b5dc-802683a96d22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.398386 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrd4d\" (UniqueName: \"kubernetes.io/projected/22d51638-ba9b-4627-b5dc-802683a96d22-kube-api-access-vrd4d\") pod \"collect-profiles-29496285-mzjhp\" (UID: \"22d51638-ba9b-4627-b5dc-802683a96d22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.399611 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/22d51638-ba9b-4627-b5dc-802683a96d22-config-volume\") pod \"collect-profiles-29496285-mzjhp\" (UID: \"22d51638-ba9b-4627-b5dc-802683a96d22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.418107 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/22d51638-ba9b-4627-b5dc-802683a96d22-secret-volume\") pod \"collect-profiles-29496285-mzjhp\" (UID: \"22d51638-ba9b-4627-b5dc-802683a96d22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.422968 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrd4d\" (UniqueName: \"kubernetes.io/projected/22d51638-ba9b-4627-b5dc-802683a96d22-kube-api-access-vrd4d\") pod \"collect-profiles-29496285-mzjhp\" (UID: \"22d51638-ba9b-4627-b5dc-802683a96d22\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" Jan 30 12:45:00 crc kubenswrapper[4703]: I0130 12:45:00.526118 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" Jan 30 12:45:01 crc kubenswrapper[4703]: I0130 12:45:01.080453 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp"] Jan 30 12:45:01 crc kubenswrapper[4703]: I0130 12:45:01.448074 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" event={"ID":"22d51638-ba9b-4627-b5dc-802683a96d22","Type":"ContainerStarted","Data":"0e5ab7401d002f425b62fcf62cc68cce1220fdd5eb5d78372918df68740163d8"} Jan 30 12:45:01 crc kubenswrapper[4703]: I0130 12:45:01.449438 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" event={"ID":"22d51638-ba9b-4627-b5dc-802683a96d22","Type":"ContainerStarted","Data":"a04189050a549fc6ae20b632a1a46b5b603e1f1fb17409925e56a21d08f258ff"} Jan 30 12:45:01 crc kubenswrapper[4703]: I0130 12:45:01.473658 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" podStartSLOduration=1.473629705 podStartE2EDuration="1.473629705s" podCreationTimestamp="2026-01-30 12:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:45:01.467315398 +0000 UTC m=+2937.245137052" watchObservedRunningTime="2026-01-30 12:45:01.473629705 +0000 UTC m=+2937.251451359" Jan 30 12:45:01 crc kubenswrapper[4703]: E0130 12:45:01.771955 4703 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod22d51638_ba9b_4627_b5dc_802683a96d22.slice/crio-0e5ab7401d002f425b62fcf62cc68cce1220fdd5eb5d78372918df68740163d8.scope\": RecentStats: unable to find data in memory cache]" Jan 30 12:45:02 crc kubenswrapper[4703]: I0130 12:45:02.461831 4703 generic.go:334] "Generic (PLEG): container finished" podID="22d51638-ba9b-4627-b5dc-802683a96d22" containerID="0e5ab7401d002f425b62fcf62cc68cce1220fdd5eb5d78372918df68740163d8" exitCode=0 Jan 30 12:45:02 crc kubenswrapper[4703]: I0130 12:45:02.461888 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" event={"ID":"22d51638-ba9b-4627-b5dc-802683a96d22","Type":"ContainerDied","Data":"0e5ab7401d002f425b62fcf62cc68cce1220fdd5eb5d78372918df68740163d8"} Jan 30 12:45:04 crc kubenswrapper[4703]: I0130 12:45:04.092179 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:45:04 crc kubenswrapper[4703]: E0130 12:45:04.092932 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:45:04 crc kubenswrapper[4703]: I0130 12:45:04.155684 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" Jan 30 12:45:04 crc kubenswrapper[4703]: I0130 12:45:04.169271 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrd4d\" (UniqueName: \"kubernetes.io/projected/22d51638-ba9b-4627-b5dc-802683a96d22-kube-api-access-vrd4d\") pod \"22d51638-ba9b-4627-b5dc-802683a96d22\" (UID: \"22d51638-ba9b-4627-b5dc-802683a96d22\") " Jan 30 12:45:04 crc kubenswrapper[4703]: I0130 12:45:04.171280 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/22d51638-ba9b-4627-b5dc-802683a96d22-secret-volume\") pod \"22d51638-ba9b-4627-b5dc-802683a96d22\" (UID: \"22d51638-ba9b-4627-b5dc-802683a96d22\") " Jan 30 12:45:04 crc kubenswrapper[4703]: I0130 12:45:04.172178 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/22d51638-ba9b-4627-b5dc-802683a96d22-config-volume\") pod \"22d51638-ba9b-4627-b5dc-802683a96d22\" (UID: \"22d51638-ba9b-4627-b5dc-802683a96d22\") " Jan 30 12:45:04 crc kubenswrapper[4703]: I0130 12:45:04.173421 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22d51638-ba9b-4627-b5dc-802683a96d22-config-volume" (OuterVolumeSpecName: "config-volume") pod "22d51638-ba9b-4627-b5dc-802683a96d22" (UID: "22d51638-ba9b-4627-b5dc-802683a96d22"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:45:04 crc kubenswrapper[4703]: I0130 12:45:04.175106 4703 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/22d51638-ba9b-4627-b5dc-802683a96d22-config-volume\") on node \"crc\" DevicePath \"\"" Jan 30 12:45:04 crc kubenswrapper[4703]: I0130 12:45:04.188838 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22d51638-ba9b-4627-b5dc-802683a96d22-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "22d51638-ba9b-4627-b5dc-802683a96d22" (UID: "22d51638-ba9b-4627-b5dc-802683a96d22"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:45:04 crc kubenswrapper[4703]: I0130 12:45:04.196362 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22d51638-ba9b-4627-b5dc-802683a96d22-kube-api-access-vrd4d" (OuterVolumeSpecName: "kube-api-access-vrd4d") pod "22d51638-ba9b-4627-b5dc-802683a96d22" (UID: "22d51638-ba9b-4627-b5dc-802683a96d22"). InnerVolumeSpecName "kube-api-access-vrd4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:45:04 crc kubenswrapper[4703]: I0130 12:45:04.277756 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrd4d\" (UniqueName: \"kubernetes.io/projected/22d51638-ba9b-4627-b5dc-802683a96d22-kube-api-access-vrd4d\") on node \"crc\" DevicePath \"\"" Jan 30 12:45:04 crc kubenswrapper[4703]: I0130 12:45:04.277811 4703 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/22d51638-ba9b-4627-b5dc-802683a96d22-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 30 12:45:04 crc kubenswrapper[4703]: I0130 12:45:04.486038 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" event={"ID":"22d51638-ba9b-4627-b5dc-802683a96d22","Type":"ContainerDied","Data":"a04189050a549fc6ae20b632a1a46b5b603e1f1fb17409925e56a21d08f258ff"} Jan 30 12:45:04 crc kubenswrapper[4703]: I0130 12:45:04.486100 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a04189050a549fc6ae20b632a1a46b5b603e1f1fb17409925e56a21d08f258ff" Jan 30 12:45:04 crc kubenswrapper[4703]: I0130 12:45:04.486097 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496285-mzjhp" Jan 30 12:45:04 crc kubenswrapper[4703]: I0130 12:45:04.578158 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld"] Jan 30 12:45:04 crc kubenswrapper[4703]: I0130 12:45:04.588474 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496240-g78ld"] Jan 30 12:45:05 crc kubenswrapper[4703]: I0130 12:45:05.107049 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="178b5fda-ccd1-492b-9d7f-5a44efecaaac" path="/var/lib/kubelet/pods/178b5fda-ccd1-492b-9d7f-5a44efecaaac/volumes" Jan 30 12:45:12 crc kubenswrapper[4703]: I0130 12:45:12.823445 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:45:12 crc kubenswrapper[4703]: I0130 12:45:12.824433 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:45:12 crc kubenswrapper[4703]: I0130 12:45:12.824522 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 12:45:12 crc kubenswrapper[4703]: I0130 12:45:12.825730 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f52cc783a86b634ff8736b30e24b266ea538324fc6801b094268d35989eedffc"} pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 12:45:12 crc kubenswrapper[4703]: I0130 12:45:12.825798 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" containerID="cri-o://f52cc783a86b634ff8736b30e24b266ea538324fc6801b094268d35989eedffc" gracePeriod=600 Jan 30 12:45:13 crc kubenswrapper[4703]: I0130 12:45:13.718005 4703 generic.go:334] "Generic (PLEG): container finished" podID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerID="f52cc783a86b634ff8736b30e24b266ea538324fc6801b094268d35989eedffc" exitCode=0 Jan 30 12:45:13 crc kubenswrapper[4703]: I0130 12:45:13.718102 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerDied","Data":"f52cc783a86b634ff8736b30e24b266ea538324fc6801b094268d35989eedffc"} Jan 30 12:45:13 crc kubenswrapper[4703]: I0130 12:45:13.718938 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerStarted","Data":"6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913"} Jan 30 12:45:13 crc kubenswrapper[4703]: I0130 12:45:13.718974 4703 scope.go:117] "RemoveContainer" containerID="23771203f5c69f7695415887ebf75c2a47cc46ab37ea5b83672a2c48b585dfa7" Jan 30 12:45:18 crc kubenswrapper[4703]: I0130 12:45:18.086442 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:45:18 crc kubenswrapper[4703]: E0130 12:45:18.087311 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:45:29 crc kubenswrapper[4703]: I0130 12:45:29.087076 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:45:29 crc kubenswrapper[4703]: E0130 12:45:29.088624 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:45:42 crc kubenswrapper[4703]: I0130 12:45:42.087813 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:45:42 crc kubenswrapper[4703]: E0130 12:45:42.089577 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:45:54 crc kubenswrapper[4703]: I0130 12:45:54.595313 4703 scope.go:117] "RemoveContainer" containerID="afd2ce35861b11d0750c6803d3c3b6fff9e34ea4c8c679c9bee26432c00fe539" Jan 30 12:45:56 crc kubenswrapper[4703]: I0130 12:45:56.087119 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:45:56 crc kubenswrapper[4703]: E0130 12:45:56.088034 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:46:11 crc kubenswrapper[4703]: I0130 12:46:11.087607 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:46:11 crc kubenswrapper[4703]: I0130 12:46:11.648518 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerStarted","Data":"56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002"} Jan 30 12:46:14 crc kubenswrapper[4703]: I0130 12:46:14.686602 4703 generic.go:334] "Generic (PLEG): container finished" podID="2fc19a6b-3cde-4bb5-9499-f5be846289da" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" exitCode=1 Jan 30 12:46:14 crc kubenswrapper[4703]: I0130 12:46:14.686684 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerDied","Data":"56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002"} Jan 30 12:46:14 crc kubenswrapper[4703]: I0130 12:46:14.687472 4703 scope.go:117] "RemoveContainer" containerID="92f6c98f10257d9e3a4a0a1268bda035c910abf3e3f55b55c0037e4799494c29" Jan 30 12:46:14 crc kubenswrapper[4703]: I0130 12:46:14.688519 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:46:14 crc kubenswrapper[4703]: E0130 12:46:14.689054 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:46:16 crc kubenswrapper[4703]: I0130 12:46:16.035530 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:46:16 crc kubenswrapper[4703]: I0130 12:46:16.036071 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:46:16 crc kubenswrapper[4703]: I0130 12:46:16.036086 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:46:16 crc kubenswrapper[4703]: I0130 12:46:16.036099 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:46:16 crc kubenswrapper[4703]: I0130 12:46:16.037084 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:46:16 crc kubenswrapper[4703]: E0130 12:46:16.037418 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:46:29 crc kubenswrapper[4703]: I0130 12:46:29.087268 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:46:29 crc kubenswrapper[4703]: E0130 12:46:29.090280 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:46:37 crc kubenswrapper[4703]: I0130 12:46:37.244738 4703 generic.go:334] "Generic (PLEG): container finished" podID="0563aaa6-b1b3-411e-8ff0-4ef8986fa932" containerID="563e2f69f0173baefdbed58a23bb70adb04ceb46021ee484db9866a4f8d2dbfd" exitCode=0 Jan 30 12:46:37 crc kubenswrapper[4703]: I0130 12:46:37.245386 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" event={"ID":"0563aaa6-b1b3-411e-8ff0-4ef8986fa932","Type":"ContainerDied","Data":"563e2f69f0173baefdbed58a23bb70adb04ceb46021ee484db9866a4f8d2dbfd"} Jan 30 12:46:38 crc kubenswrapper[4703]: I0130 12:46:38.792863 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:46:38 crc kubenswrapper[4703]: I0130 12:46:38.914538 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rjmv7\" (UniqueName: \"kubernetes.io/projected/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-kube-api-access-rjmv7\") pod \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " Jan 30 12:46:38 crc kubenswrapper[4703]: I0130 12:46:38.914610 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ssh-key-openstack-edpm-ipam\") pod \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " Jan 30 12:46:38 crc kubenswrapper[4703]: I0130 12:46:38.914761 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-inventory\") pod \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " Jan 30 12:46:38 crc kubenswrapper[4703]: I0130 12:46:38.914792 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-0\") pod \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " Jan 30 12:46:38 crc kubenswrapper[4703]: I0130 12:46:38.914873 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-1\") pod \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " Jan 30 12:46:38 crc kubenswrapper[4703]: I0130 12:46:38.914961 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-2\") pod \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " Jan 30 12:46:38 crc kubenswrapper[4703]: I0130 12:46:38.915007 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-telemetry-combined-ca-bundle\") pod \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\" (UID: \"0563aaa6-b1b3-411e-8ff0-4ef8986fa932\") " Jan 30 12:46:38 crc kubenswrapper[4703]: I0130 12:46:38.923398 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "0563aaa6-b1b3-411e-8ff0-4ef8986fa932" (UID: "0563aaa6-b1b3-411e-8ff0-4ef8986fa932"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:46:38 crc kubenswrapper[4703]: I0130 12:46:38.924348 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-kube-api-access-rjmv7" (OuterVolumeSpecName: "kube-api-access-rjmv7") pod "0563aaa6-b1b3-411e-8ff0-4ef8986fa932" (UID: "0563aaa6-b1b3-411e-8ff0-4ef8986fa932"). InnerVolumeSpecName "kube-api-access-rjmv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:46:38 crc kubenswrapper[4703]: I0130 12:46:38.955691 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "0563aaa6-b1b3-411e-8ff0-4ef8986fa932" (UID: "0563aaa6-b1b3-411e-8ff0-4ef8986fa932"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:46:38 crc kubenswrapper[4703]: I0130 12:46:38.956973 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "0563aaa6-b1b3-411e-8ff0-4ef8986fa932" (UID: "0563aaa6-b1b3-411e-8ff0-4ef8986fa932"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:46:38 crc kubenswrapper[4703]: I0130 12:46:38.959031 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-inventory" (OuterVolumeSpecName: "inventory") pod "0563aaa6-b1b3-411e-8ff0-4ef8986fa932" (UID: "0563aaa6-b1b3-411e-8ff0-4ef8986fa932"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:46:38 crc kubenswrapper[4703]: I0130 12:46:38.962006 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "0563aaa6-b1b3-411e-8ff0-4ef8986fa932" (UID: "0563aaa6-b1b3-411e-8ff0-4ef8986fa932"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:46:38 crc kubenswrapper[4703]: I0130 12:46:38.972236 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "0563aaa6-b1b3-411e-8ff0-4ef8986fa932" (UID: "0563aaa6-b1b3-411e-8ff0-4ef8986fa932"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:46:39 crc kubenswrapper[4703]: I0130 12:46:39.017682 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rjmv7\" (UniqueName: \"kubernetes.io/projected/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-kube-api-access-rjmv7\") on node \"crc\" DevicePath \"\"" Jan 30 12:46:39 crc kubenswrapper[4703]: I0130 12:46:39.017723 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 30 12:46:39 crc kubenswrapper[4703]: I0130 12:46:39.017734 4703 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-inventory\") on node \"crc\" DevicePath \"\"" Jan 30 12:46:39 crc kubenswrapper[4703]: I0130 12:46:39.017747 4703 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:46:39 crc kubenswrapper[4703]: I0130 12:46:39.017758 4703 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 30 12:46:39 crc kubenswrapper[4703]: I0130 12:46:39.017767 4703 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Jan 30 12:46:39 crc kubenswrapper[4703]: I0130 12:46:39.017776 4703 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0563aaa6-b1b3-411e-8ff0-4ef8986fa932-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:46:39 crc kubenswrapper[4703]: I0130 12:46:39.269686 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" event={"ID":"0563aaa6-b1b3-411e-8ff0-4ef8986fa932","Type":"ContainerDied","Data":"c900c05d55fa9fb902444f918da79ea305e3f61394e18805fa4c75da7d3b92bc"} Jan 30 12:46:39 crc kubenswrapper[4703]: I0130 12:46:39.269754 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c900c05d55fa9fb902444f918da79ea305e3f61394e18805fa4c75da7d3b92bc" Jan 30 12:46:39 crc kubenswrapper[4703]: I0130 12:46:39.269834 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql" Jan 30 12:46:41 crc kubenswrapper[4703]: I0130 12:46:41.087794 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:46:41 crc kubenswrapper[4703]: E0130 12:46:41.088997 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:46:52 crc kubenswrapper[4703]: I0130 12:46:52.086770 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:46:52 crc kubenswrapper[4703]: E0130 12:46:52.088027 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:47:03 crc kubenswrapper[4703]: I0130 12:47:03.087526 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:47:03 crc kubenswrapper[4703]: E0130 12:47:03.088571 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:47:15 crc kubenswrapper[4703]: I0130 12:47:15.104296 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:47:15 crc kubenswrapper[4703]: E0130 12:47:15.106628 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:47:15 crc kubenswrapper[4703]: I0130 12:47:15.118990 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 30 12:47:15 crc kubenswrapper[4703]: I0130 12:47:15.119458 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerName="prometheus" containerID="cri-o://b93716e0d37b866137d69aaac757b996589738dfddc32139ac80f5192065268c" gracePeriod=600 Jan 30 12:47:15 crc kubenswrapper[4703]: I0130 12:47:15.119925 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerName="thanos-sidecar" containerID="cri-o://93898e666f9e06e253c6f5caa355ad757f959514951a03b165d07fe622904434" gracePeriod=600 Jan 30 12:47:15 crc kubenswrapper[4703]: I0130 12:47:15.119980 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerName="config-reloader" containerID="cri-o://cbf2e3a5111beddb54852d3d864b33df39fe6e7ea9aa977e50c5c6a07a7b51b2" gracePeriod=600 Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.178646 4703 generic.go:334] "Generic (PLEG): container finished" podID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerID="93898e666f9e06e253c6f5caa355ad757f959514951a03b165d07fe622904434" exitCode=0 Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.179171 4703 generic.go:334] "Generic (PLEG): container finished" podID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerID="cbf2e3a5111beddb54852d3d864b33df39fe6e7ea9aa977e50c5c6a07a7b51b2" exitCode=0 Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.179182 4703 generic.go:334] "Generic (PLEG): container finished" podID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerID="b93716e0d37b866137d69aaac757b996589738dfddc32139ac80f5192065268c" exitCode=0 Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.179210 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7a919f37-730e-42a4-848f-ae5b2096b2d2","Type":"ContainerDied","Data":"93898e666f9e06e253c6f5caa355ad757f959514951a03b165d07fe622904434"} Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.179255 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7a919f37-730e-42a4-848f-ae5b2096b2d2","Type":"ContainerDied","Data":"cbf2e3a5111beddb54852d3d864b33df39fe6e7ea9aa977e50c5c6a07a7b51b2"} Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.179271 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7a919f37-730e-42a4-848f-ae5b2096b2d2","Type":"ContainerDied","Data":"b93716e0d37b866137d69aaac757b996589738dfddc32139ac80f5192065268c"} Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.179286 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"7a919f37-730e-42a4-848f-ae5b2096b2d2","Type":"ContainerDied","Data":"cadab602e9c98ba517bc13d059da63d64fc8ad6ba808760adb4cc4185a7d01cf"} Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.179299 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cadab602e9c98ba517bc13d059da63d64fc8ad6ba808760adb4cc4185a7d01cf" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.244585 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.324203 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7a919f37-730e-42a4-848f-ae5b2096b2d2-config-out\") pod \"7a919f37-730e-42a4-848f-ae5b2096b2d2\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.324627 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-secret-combined-ca-bundle\") pod \"7a919f37-730e-42a4-848f-ae5b2096b2d2\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.325015 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") pod \"7a919f37-730e-42a4-848f-ae5b2096b2d2\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.325137 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-1\") pod \"7a919f37-730e-42a4-848f-ae5b2096b2d2\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.325254 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"7a919f37-730e-42a4-848f-ae5b2096b2d2\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.325337 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-2\") pod \"7a919f37-730e-42a4-848f-ae5b2096b2d2\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.325437 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-config\") pod \"7a919f37-730e-42a4-848f-ae5b2096b2d2\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.325532 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-thanos-prometheus-http-client-file\") pod \"7a919f37-730e-42a4-848f-ae5b2096b2d2\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.325632 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-0\") pod \"7a919f37-730e-42a4-848f-ae5b2096b2d2\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.325724 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"7a919f37-730e-42a4-848f-ae5b2096b2d2\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.325833 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7a919f37-730e-42a4-848f-ae5b2096b2d2-tls-assets\") pod \"7a919f37-730e-42a4-848f-ae5b2096b2d2\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.325926 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config\") pod \"7a919f37-730e-42a4-848f-ae5b2096b2d2\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.326034 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92fh8\" (UniqueName: \"kubernetes.io/projected/7a919f37-730e-42a4-848f-ae5b2096b2d2-kube-api-access-92fh8\") pod \"7a919f37-730e-42a4-848f-ae5b2096b2d2\" (UID: \"7a919f37-730e-42a4-848f-ae5b2096b2d2\") " Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.342484 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-secret-combined-ca-bundle" (OuterVolumeSpecName: "secret-combined-ca-bundle") pod "7a919f37-730e-42a4-848f-ae5b2096b2d2" (UID: "7a919f37-730e-42a4-848f-ae5b2096b2d2"). InnerVolumeSpecName "secret-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.360864 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "7a919f37-730e-42a4-848f-ae5b2096b2d2" (UID: "7a919f37-730e-42a4-848f-ae5b2096b2d2"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.369356 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "7a919f37-730e-42a4-848f-ae5b2096b2d2" (UID: "7a919f37-730e-42a4-848f-ae5b2096b2d2"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.372709 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "7a919f37-730e-42a4-848f-ae5b2096b2d2" (UID: "7a919f37-730e-42a4-848f-ae5b2096b2d2"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.403499 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a919f37-730e-42a4-848f-ae5b2096b2d2-kube-api-access-92fh8" (OuterVolumeSpecName: "kube-api-access-92fh8") pod "7a919f37-730e-42a4-848f-ae5b2096b2d2" (UID: "7a919f37-730e-42a4-848f-ae5b2096b2d2"). InnerVolumeSpecName "kube-api-access-92fh8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.409959 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "7a919f37-730e-42a4-848f-ae5b2096b2d2" (UID: "7a919f37-730e-42a4-848f-ae5b2096b2d2"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.410332 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d") pod "7a919f37-730e-42a4-848f-ae5b2096b2d2" (UID: "7a919f37-730e-42a4-848f-ae5b2096b2d2"). InnerVolumeSpecName "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.420351 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-config" (OuterVolumeSpecName: "config") pod "7a919f37-730e-42a4-848f-ae5b2096b2d2" (UID: "7a919f37-730e-42a4-848f-ae5b2096b2d2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.428303 4703 reconciler_common.go:293] "Volume detached for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-secret-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.428343 4703 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.428356 4703 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.428372 4703 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.428384 4703 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.428398 4703 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/7a919f37-730e-42a4-848f-ae5b2096b2d2-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.428410 4703 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") on node \"crc\" DevicePath \"\"" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.428424 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92fh8\" (UniqueName: \"kubernetes.io/projected/7a919f37-730e-42a4-848f-ae5b2096b2d2-kube-api-access-92fh8\") on node \"crc\" DevicePath \"\"" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.444402 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7a919f37-730e-42a4-848f-ae5b2096b2d2-config-out" (OuterVolumeSpecName: "config-out") pod "7a919f37-730e-42a4-848f-ae5b2096b2d2" (UID: "7a919f37-730e-42a4-848f-ae5b2096b2d2"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.444420 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d") pod "7a919f37-730e-42a4-848f-ae5b2096b2d2" (UID: "7a919f37-730e-42a4-848f-ae5b2096b2d2"). InnerVolumeSpecName "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.457512 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a919f37-730e-42a4-848f-ae5b2096b2d2-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "7a919f37-730e-42a4-848f-ae5b2096b2d2" (UID: "7a919f37-730e-42a4-848f-ae5b2096b2d2"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.530481 4703 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") on node \"crc\" DevicePath \"\"" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.530527 4703 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/7a919f37-730e-42a4-848f-ae5b2096b2d2-tls-assets\") on node \"crc\" DevicePath \"\"" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.530541 4703 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/7a919f37-730e-42a4-848f-ae5b2096b2d2-config-out\") on node \"crc\" DevicePath \"\"" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.622263 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config" (OuterVolumeSpecName: "web-config") pod "7a919f37-730e-42a4-848f-ae5b2096b2d2" (UID: "7a919f37-730e-42a4-848f-ae5b2096b2d2"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.634039 4703 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/7a919f37-730e-42a4-848f-ae5b2096b2d2-web-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.688268 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "7a919f37-730e-42a4-848f-ae5b2096b2d2" (UID: "7a919f37-730e-42a4-848f-ae5b2096b2d2"). InnerVolumeSpecName "pvc-f44c6b57-502f-456c-b62d-7562ab4250af". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.735412 4703 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-f44c6b57-502f-456c-b62d-7562ab4250af\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") on node \"crc\" " Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.778374 4703 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.779174 4703 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-f44c6b57-502f-456c-b62d-7562ab4250af" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af") on node "crc" Jan 30 12:47:16 crc kubenswrapper[4703]: I0130 12:47:16.838271 4703 reconciler_common.go:293] "Volume detached for volume \"pvc-f44c6b57-502f-456c-b62d-7562ab4250af\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") on node \"crc\" DevicePath \"\"" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.191243 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.242171 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.255549 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.283402 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 30 12:47:17 crc kubenswrapper[4703]: E0130 12:47:17.284078 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerName="config-reloader" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.284114 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerName="config-reloader" Jan 30 12:47:17 crc kubenswrapper[4703]: E0130 12:47:17.284153 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0563aaa6-b1b3-411e-8ff0-4ef8986fa932" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.284162 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="0563aaa6-b1b3-411e-8ff0-4ef8986fa932" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 30 12:47:17 crc kubenswrapper[4703]: E0130 12:47:17.284174 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerName="prometheus" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.284182 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerName="prometheus" Jan 30 12:47:17 crc kubenswrapper[4703]: E0130 12:47:17.284195 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22d51638-ba9b-4627-b5dc-802683a96d22" containerName="collect-profiles" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.284201 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="22d51638-ba9b-4627-b5dc-802683a96d22" containerName="collect-profiles" Jan 30 12:47:17 crc kubenswrapper[4703]: E0130 12:47:17.284213 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerName="thanos-sidecar" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.284218 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerName="thanos-sidecar" Jan 30 12:47:17 crc kubenswrapper[4703]: E0130 12:47:17.284232 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerName="init-config-reloader" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.284239 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerName="init-config-reloader" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.284515 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerName="thanos-sidecar" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.284542 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="22d51638-ba9b-4627-b5dc-802683a96d22" containerName="collect-profiles" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.284552 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="0563aaa6-b1b3-411e-8ff0-4ef8986fa932" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.284564 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerName="prometheus" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.284573 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerName="config-reloader" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.286707 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.293578 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-f82pr" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.293853 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.293989 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.316332 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.316781 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.316911 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.317003 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.319968 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.330617 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.454574 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.454651 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.454681 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/b250e200-23dc-4508-924c-a4a666aad8e5-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.454722 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b250e200-23dc-4508-924c-a4a666aad8e5-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.454773 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-f44c6b57-502f-456c-b62d-7562ab4250af\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.454791 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.454820 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pgzr\" (UniqueName: \"kubernetes.io/projected/b250e200-23dc-4508-924c-a4a666aad8e5-kube-api-access-8pgzr\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.454882 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.454919 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/b250e200-23dc-4508-924c-a4a666aad8e5-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.454938 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.454966 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b250e200-23dc-4508-924c-a4a666aad8e5-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.454984 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b250e200-23dc-4508-924c-a4a666aad8e5-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.455012 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-config\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.557433 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/b250e200-23dc-4508-924c-a4a666aad8e5-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.557498 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.557551 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b250e200-23dc-4508-924c-a4a666aad8e5-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.557574 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b250e200-23dc-4508-924c-a4a666aad8e5-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.557611 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-config\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.557662 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.557694 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.557727 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/b250e200-23dc-4508-924c-a4a666aad8e5-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.557790 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b250e200-23dc-4508-924c-a4a666aad8e5-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.557860 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-f44c6b57-502f-456c-b62d-7562ab4250af\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.557885 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.557935 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pgzr\" (UniqueName: \"kubernetes.io/projected/b250e200-23dc-4508-924c-a4a666aad8e5-kube-api-access-8pgzr\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.558044 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.558738 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/b250e200-23dc-4508-924c-a4a666aad8e5-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.558770 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b250e200-23dc-4508-924c-a4a666aad8e5-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.559739 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/b250e200-23dc-4508-924c-a4a666aad8e5-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.564042 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b250e200-23dc-4508-924c-a4a666aad8e5-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.565803 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.566019 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.566419 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.568376 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.568917 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b250e200-23dc-4508-924c-a4a666aad8e5-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.570432 4703 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.570471 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-f44c6b57-502f-456c-b62d-7562ab4250af\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/e923a3fbd1684cd164e99ff35e919b68d893d89a026a0f736ef548b3af68c494/globalmount\"" pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.578923 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-config\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.579406 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b250e200-23dc-4508-924c-a4a666aad8e5-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.587943 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pgzr\" (UniqueName: \"kubernetes.io/projected/b250e200-23dc-4508-924c-a4a666aad8e5-kube-api-access-8pgzr\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.616573 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-f44c6b57-502f-456c-b62d-7562ab4250af\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-f44c6b57-502f-456c-b62d-7562ab4250af\") pod \"prometheus-metric-storage-0\" (UID: \"b250e200-23dc-4508-924c-a4a666aad8e5\") " pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:17 crc kubenswrapper[4703]: I0130 12:47:17.730508 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:18 crc kubenswrapper[4703]: I0130 12:47:18.281323 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 30 12:47:19 crc kubenswrapper[4703]: I0130 12:47:19.102720 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a919f37-730e-42a4-848f-ae5b2096b2d2" path="/var/lib/kubelet/pods/7a919f37-730e-42a4-848f-ae5b2096b2d2/volumes" Jan 30 12:47:19 crc kubenswrapper[4703]: I0130 12:47:19.153379 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="7a919f37-730e-42a4-848f-ae5b2096b2d2" containerName="prometheus" probeResult="failure" output="Get \"https://10.217.0.135:9090/-/ready\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 30 12:47:19 crc kubenswrapper[4703]: I0130 12:47:19.212613 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b250e200-23dc-4508-924c-a4a666aad8e5","Type":"ContainerStarted","Data":"7a7d46504fb4164fc3dd79068dd834dbfa3477c7487d4dd83517f8c0ec6d8b7a"} Jan 30 12:47:23 crc kubenswrapper[4703]: I0130 12:47:23.268268 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b250e200-23dc-4508-924c-a4a666aad8e5","Type":"ContainerStarted","Data":"d780eee2cf64c73d85598405de67b9e507becd2cca201d6af85ad26bf3c862e0"} Jan 30 12:47:28 crc kubenswrapper[4703]: I0130 12:47:28.093384 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:47:28 crc kubenswrapper[4703]: E0130 12:47:28.094581 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:47:33 crc kubenswrapper[4703]: I0130 12:47:33.426674 4703 generic.go:334] "Generic (PLEG): container finished" podID="b250e200-23dc-4508-924c-a4a666aad8e5" containerID="d780eee2cf64c73d85598405de67b9e507becd2cca201d6af85ad26bf3c862e0" exitCode=0 Jan 30 12:47:33 crc kubenswrapper[4703]: I0130 12:47:33.426809 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b250e200-23dc-4508-924c-a4a666aad8e5","Type":"ContainerDied","Data":"d780eee2cf64c73d85598405de67b9e507becd2cca201d6af85ad26bf3c862e0"} Jan 30 12:47:34 crc kubenswrapper[4703]: I0130 12:47:34.444672 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b250e200-23dc-4508-924c-a4a666aad8e5","Type":"ContainerStarted","Data":"4c8c594231682ab31d8784af41c40918a29e26c3cf31dcb99699f2281cfe6307"} Jan 30 12:47:38 crc kubenswrapper[4703]: I0130 12:47:38.492951 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b250e200-23dc-4508-924c-a4a666aad8e5","Type":"ContainerStarted","Data":"189e25b2bd34bc6b083ff0d3e97342d4e4fa3c965b7752e8ad79facfa39f01d6"} Jan 30 12:47:38 crc kubenswrapper[4703]: I0130 12:47:38.493485 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b250e200-23dc-4508-924c-a4a666aad8e5","Type":"ContainerStarted","Data":"b738afd0e3b2d30f841af49b0faed73bcc4417594cef930d65d98fddcaadb4af"} Jan 30 12:47:38 crc kubenswrapper[4703]: I0130 12:47:38.555317 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=21.55527701 podStartE2EDuration="21.55527701s" podCreationTimestamp="2026-01-30 12:47:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 12:47:38.546136158 +0000 UTC m=+3094.323957812" watchObservedRunningTime="2026-01-30 12:47:38.55527701 +0000 UTC m=+3094.333098704" Jan 30 12:47:42 crc kubenswrapper[4703]: I0130 12:47:42.086810 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:47:42 crc kubenswrapper[4703]: E0130 12:47:42.087947 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:47:42 crc kubenswrapper[4703]: I0130 12:47:42.731410 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:42 crc kubenswrapper[4703]: I0130 12:47:42.823728 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:47:42 crc kubenswrapper[4703]: I0130 12:47:42.823828 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:47:47 crc kubenswrapper[4703]: I0130 12:47:47.731775 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:47 crc kubenswrapper[4703]: I0130 12:47:47.739378 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:48 crc kubenswrapper[4703]: I0130 12:47:48.596096 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 30 12:47:54 crc kubenswrapper[4703]: I0130 12:47:54.086816 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:47:54 crc kubenswrapper[4703]: E0130 12:47:54.087954 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:47:54 crc kubenswrapper[4703]: I0130 12:47:54.710101 4703 scope.go:117] "RemoveContainer" containerID="b93716e0d37b866137d69aaac757b996589738dfddc32139ac80f5192065268c" Jan 30 12:47:54 crc kubenswrapper[4703]: I0130 12:47:54.740822 4703 scope.go:117] "RemoveContainer" containerID="cbf2e3a5111beddb54852d3d864b33df39fe6e7ea9aa977e50c5c6a07a7b51b2" Jan 30 12:47:54 crc kubenswrapper[4703]: I0130 12:47:54.775676 4703 scope.go:117] "RemoveContainer" containerID="93898e666f9e06e253c6f5caa355ad757f959514951a03b165d07fe622904434" Jan 30 12:47:54 crc kubenswrapper[4703]: I0130 12:47:54.805019 4703 scope.go:117] "RemoveContainer" containerID="10e0bcb02fd9e626b5845d848fc0133efa1c70a29ef4dadf9b2cbb9c1160d864" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.125894 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.128561 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.135419 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.135445 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-6lwz6" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.135653 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.137347 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.150861 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.177098 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.177215 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/01f70e75-8739-4f5d-bd1b-c064f21b1565-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.177237 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.177261 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.177323 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-564t9\" (UniqueName: \"kubernetes.io/projected/01f70e75-8739-4f5d-bd1b-c064f21b1565-kube-api-access-564t9\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.177383 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/01f70e75-8739-4f5d-bd1b-c064f21b1565-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.177430 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/01f70e75-8739-4f5d-bd1b-c064f21b1565-config-data\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.177446 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.177466 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/01f70e75-8739-4f5d-bd1b-c064f21b1565-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.278881 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/01f70e75-8739-4f5d-bd1b-c064f21b1565-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.278973 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/01f70e75-8739-4f5d-bd1b-c064f21b1565-config-data\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.278994 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.279979 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/01f70e75-8739-4f5d-bd1b-c064f21b1565-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.280271 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.280358 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/01f70e75-8739-4f5d-bd1b-c064f21b1565-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.280377 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.280398 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.280470 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-564t9\" (UniqueName: \"kubernetes.io/projected/01f70e75-8739-4f5d-bd1b-c064f21b1565-kube-api-access-564t9\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.280672 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/01f70e75-8739-4f5d-bd1b-c064f21b1565-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.280917 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.280984 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/01f70e75-8739-4f5d-bd1b-c064f21b1565-config-data\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.281227 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/01f70e75-8739-4f5d-bd1b-c064f21b1565-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.281592 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/01f70e75-8739-4f5d-bd1b-c064f21b1565-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.286006 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.286381 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.291940 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.301601 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-564t9\" (UniqueName: \"kubernetes.io/projected/01f70e75-8739-4f5d-bd1b-c064f21b1565-kube-api-access-564t9\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.313583 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " pod="openstack/tempest-tests-tempest" Jan 30 12:48:06 crc kubenswrapper[4703]: I0130 12:48:06.476070 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 30 12:48:07 crc kubenswrapper[4703]: I0130 12:48:07.024078 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Jan 30 12:48:07 crc kubenswrapper[4703]: W0130 12:48:07.029987 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod01f70e75_8739_4f5d_bd1b_c064f21b1565.slice/crio-4e35dc687b7ecfaee2a229a738f968bb38fc844705c5afea86fc3400d1d6e4de WatchSource:0}: Error finding container 4e35dc687b7ecfaee2a229a738f968bb38fc844705c5afea86fc3400d1d6e4de: Status 404 returned error can't find the container with id 4e35dc687b7ecfaee2a229a738f968bb38fc844705c5afea86fc3400d1d6e4de Jan 30 12:48:07 crc kubenswrapper[4703]: I0130 12:48:07.034172 4703 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 12:48:07 crc kubenswrapper[4703]: I0130 12:48:07.230241 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"01f70e75-8739-4f5d-bd1b-c064f21b1565","Type":"ContainerStarted","Data":"4e35dc687b7ecfaee2a229a738f968bb38fc844705c5afea86fc3400d1d6e4de"} Jan 30 12:48:08 crc kubenswrapper[4703]: I0130 12:48:08.087060 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:48:08 crc kubenswrapper[4703]: E0130 12:48:08.087904 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:48:12 crc kubenswrapper[4703]: I0130 12:48:12.824919 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:48:12 crc kubenswrapper[4703]: I0130 12:48:12.825856 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:48:19 crc kubenswrapper[4703]: I0130 12:48:19.087255 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:48:19 crc kubenswrapper[4703]: E0130 12:48:19.088240 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:48:21 crc kubenswrapper[4703]: I0130 12:48:21.421139 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"01f70e75-8739-4f5d-bd1b-c064f21b1565","Type":"ContainerStarted","Data":"e0c99fbea20116b281e36ba428d38303c984e5af8893185550745082710817bd"} Jan 30 12:48:21 crc kubenswrapper[4703]: I0130 12:48:21.449299 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=3.548628976 podStartE2EDuration="16.449265584s" podCreationTimestamp="2026-01-30 12:48:05 +0000 UTC" firstStartedPulling="2026-01-30 12:48:07.033924775 +0000 UTC m=+3122.811746429" lastFinishedPulling="2026-01-30 12:48:19.934561393 +0000 UTC m=+3135.712383037" observedRunningTime="2026-01-30 12:48:21.44156206 +0000 UTC m=+3137.219383714" watchObservedRunningTime="2026-01-30 12:48:21.449265584 +0000 UTC m=+3137.227087238" Jan 30 12:48:30 crc kubenswrapper[4703]: I0130 12:48:30.086956 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:48:30 crc kubenswrapper[4703]: E0130 12:48:30.088262 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:48:38 crc kubenswrapper[4703]: I0130 12:48:38.625779 4703 generic.go:334] "Generic (PLEG): container finished" podID="01f70e75-8739-4f5d-bd1b-c064f21b1565" containerID="e0c99fbea20116b281e36ba428d38303c984e5af8893185550745082710817bd" exitCode=123 Jan 30 12:48:38 crc kubenswrapper[4703]: I0130 12:48:38.625851 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"01f70e75-8739-4f5d-bd1b-c064f21b1565","Type":"ContainerDied","Data":"e0c99fbea20116b281e36ba428d38303c984e5af8893185550745082710817bd"} Jan 30 12:48:38 crc kubenswrapper[4703]: E0130 12:48:38.736301 4703 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod01f70e75_8739_4f5d_bd1b_c064f21b1565.slice/crio-conmon-e0c99fbea20116b281e36ba428d38303c984e5af8893185550745082710817bd.scope\": RecentStats: unable to find data in memory cache]" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.094667 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.141703 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-ca-certs\") pod \"01f70e75-8739-4f5d-bd1b-c064f21b1565\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.142363 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/01f70e75-8739-4f5d-bd1b-c064f21b1565-config-data\") pod \"01f70e75-8739-4f5d-bd1b-c064f21b1565\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.142427 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/01f70e75-8739-4f5d-bd1b-c064f21b1565-test-operator-ephemeral-temporary\") pod \"01f70e75-8739-4f5d-bd1b-c064f21b1565\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.142548 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"01f70e75-8739-4f5d-bd1b-c064f21b1565\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.142636 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/01f70e75-8739-4f5d-bd1b-c064f21b1565-openstack-config\") pod \"01f70e75-8739-4f5d-bd1b-c064f21b1565\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.142751 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-openstack-config-secret\") pod \"01f70e75-8739-4f5d-bd1b-c064f21b1565\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.142780 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/01f70e75-8739-4f5d-bd1b-c064f21b1565-test-operator-ephemeral-workdir\") pod \"01f70e75-8739-4f5d-bd1b-c064f21b1565\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.142803 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-564t9\" (UniqueName: \"kubernetes.io/projected/01f70e75-8739-4f5d-bd1b-c064f21b1565-kube-api-access-564t9\") pod \"01f70e75-8739-4f5d-bd1b-c064f21b1565\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.142832 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-ssh-key\") pod \"01f70e75-8739-4f5d-bd1b-c064f21b1565\" (UID: \"01f70e75-8739-4f5d-bd1b-c064f21b1565\") " Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.146831 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01f70e75-8739-4f5d-bd1b-c064f21b1565-config-data" (OuterVolumeSpecName: "config-data") pod "01f70e75-8739-4f5d-bd1b-c064f21b1565" (UID: "01f70e75-8739-4f5d-bd1b-c064f21b1565"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.147545 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01f70e75-8739-4f5d-bd1b-c064f21b1565-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "01f70e75-8739-4f5d-bd1b-c064f21b1565" (UID: "01f70e75-8739-4f5d-bd1b-c064f21b1565"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.162070 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "test-operator-logs") pod "01f70e75-8739-4f5d-bd1b-c064f21b1565" (UID: "01f70e75-8739-4f5d-bd1b-c064f21b1565"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.163245 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01f70e75-8739-4f5d-bd1b-c064f21b1565-kube-api-access-564t9" (OuterVolumeSpecName: "kube-api-access-564t9") pod "01f70e75-8739-4f5d-bd1b-c064f21b1565" (UID: "01f70e75-8739-4f5d-bd1b-c064f21b1565"). InnerVolumeSpecName "kube-api-access-564t9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.190349 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "01f70e75-8739-4f5d-bd1b-c064f21b1565" (UID: "01f70e75-8739-4f5d-bd1b-c064f21b1565"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.198745 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "01f70e75-8739-4f5d-bd1b-c064f21b1565" (UID: "01f70e75-8739-4f5d-bd1b-c064f21b1565"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.216265 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "01f70e75-8739-4f5d-bd1b-c064f21b1565" (UID: "01f70e75-8739-4f5d-bd1b-c064f21b1565"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.226715 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01f70e75-8739-4f5d-bd1b-c064f21b1565-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "01f70e75-8739-4f5d-bd1b-c064f21b1565" (UID: "01f70e75-8739-4f5d-bd1b-c064f21b1565"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.239923 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/01f70e75-8739-4f5d-bd1b-c064f21b1565-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "01f70e75-8739-4f5d-bd1b-c064f21b1565" (UID: "01f70e75-8739-4f5d-bd1b-c064f21b1565"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.246476 4703 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-ca-certs\") on node \"crc\" DevicePath \"\"" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.246553 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/01f70e75-8739-4f5d-bd1b-c064f21b1565-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.246589 4703 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/01f70e75-8739-4f5d-bd1b-c064f21b1565-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.246662 4703 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.246677 4703 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/01f70e75-8739-4f5d-bd1b-c064f21b1565-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.246688 4703 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.246701 4703 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/01f70e75-8739-4f5d-bd1b-c064f21b1565-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.246710 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-564t9\" (UniqueName: \"kubernetes.io/projected/01f70e75-8739-4f5d-bd1b-c064f21b1565-kube-api-access-564t9\") on node \"crc\" DevicePath \"\"" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.246719 4703 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/01f70e75-8739-4f5d-bd1b-c064f21b1565-ssh-key\") on node \"crc\" DevicePath \"\"" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.270808 4703 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.349069 4703 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.672998 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"01f70e75-8739-4f5d-bd1b-c064f21b1565","Type":"ContainerDied","Data":"4e35dc687b7ecfaee2a229a738f968bb38fc844705c5afea86fc3400d1d6e4de"} Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.673047 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e35dc687b7ecfaee2a229a738f968bb38fc844705c5afea86fc3400d1d6e4de" Jan 30 12:48:40 crc kubenswrapper[4703]: I0130 12:48:40.673092 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Jan 30 12:48:41 crc kubenswrapper[4703]: I0130 12:48:41.088451 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:48:41 crc kubenswrapper[4703]: E0130 12:48:41.089283 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:48:42 crc kubenswrapper[4703]: I0130 12:48:42.822468 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:48:42 crc kubenswrapper[4703]: I0130 12:48:42.822544 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:48:42 crc kubenswrapper[4703]: I0130 12:48:42.822602 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 12:48:42 crc kubenswrapper[4703]: I0130 12:48:42.824563 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913"} pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 12:48:42 crc kubenswrapper[4703]: I0130 12:48:42.824692 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" containerID="cri-o://6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" gracePeriod=600 Jan 30 12:48:42 crc kubenswrapper[4703]: E0130 12:48:42.957904 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:48:43 crc kubenswrapper[4703]: I0130 12:48:43.716858 4703 generic.go:334] "Generic (PLEG): container finished" podID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" exitCode=0 Jan 30 12:48:43 crc kubenswrapper[4703]: I0130 12:48:43.716922 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerDied","Data":"6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913"} Jan 30 12:48:43 crc kubenswrapper[4703]: I0130 12:48:43.716989 4703 scope.go:117] "RemoveContainer" containerID="f52cc783a86b634ff8736b30e24b266ea538324fc6801b094268d35989eedffc" Jan 30 12:48:43 crc kubenswrapper[4703]: I0130 12:48:43.718166 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:48:43 crc kubenswrapper[4703]: E0130 12:48:43.718580 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:48:49 crc kubenswrapper[4703]: E0130 12:48:49.031486 4703 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/rpm-ostreed.service\": RecentStats: unable to find data in memory cache]" Jan 30 12:48:51 crc kubenswrapper[4703]: I0130 12:48:51.145439 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 30 12:48:51 crc kubenswrapper[4703]: E0130 12:48:51.146135 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01f70e75-8739-4f5d-bd1b-c064f21b1565" containerName="tempest-tests-tempest-tests-runner" Jan 30 12:48:51 crc kubenswrapper[4703]: I0130 12:48:51.146156 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="01f70e75-8739-4f5d-bd1b-c064f21b1565" containerName="tempest-tests-tempest-tests-runner" Jan 30 12:48:51 crc kubenswrapper[4703]: I0130 12:48:51.146429 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="01f70e75-8739-4f5d-bd1b-c064f21b1565" containerName="tempest-tests-tempest-tests-runner" Jan 30 12:48:51 crc kubenswrapper[4703]: I0130 12:48:51.147581 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 30 12:48:51 crc kubenswrapper[4703]: I0130 12:48:51.150634 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-6lwz6" Jan 30 12:48:51 crc kubenswrapper[4703]: I0130 12:48:51.157650 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 30 12:48:51 crc kubenswrapper[4703]: I0130 12:48:51.335742 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9kjl6\" (UniqueName: \"kubernetes.io/projected/546bd0aa-e89f-4f5f-bd6a-36a8a79f5f7e-kube-api-access-9kjl6\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"546bd0aa-e89f-4f5f-bd6a-36a8a79f5f7e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 30 12:48:51 crc kubenswrapper[4703]: I0130 12:48:51.335846 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"546bd0aa-e89f-4f5f-bd6a-36a8a79f5f7e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 30 12:48:51 crc kubenswrapper[4703]: I0130 12:48:51.438667 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9kjl6\" (UniqueName: \"kubernetes.io/projected/546bd0aa-e89f-4f5f-bd6a-36a8a79f5f7e-kube-api-access-9kjl6\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"546bd0aa-e89f-4f5f-bd6a-36a8a79f5f7e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 30 12:48:51 crc kubenswrapper[4703]: I0130 12:48:51.438773 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"546bd0aa-e89f-4f5f-bd6a-36a8a79f5f7e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 30 12:48:51 crc kubenswrapper[4703]: I0130 12:48:51.439389 4703 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"546bd0aa-e89f-4f5f-bd6a-36a8a79f5f7e\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 30 12:48:51 crc kubenswrapper[4703]: I0130 12:48:51.463144 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9kjl6\" (UniqueName: \"kubernetes.io/projected/546bd0aa-e89f-4f5f-bd6a-36a8a79f5f7e-kube-api-access-9kjl6\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"546bd0aa-e89f-4f5f-bd6a-36a8a79f5f7e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 30 12:48:51 crc kubenswrapper[4703]: I0130 12:48:51.467858 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"546bd0aa-e89f-4f5f-bd6a-36a8a79f5f7e\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 30 12:48:51 crc kubenswrapper[4703]: I0130 12:48:51.484019 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Jan 30 12:48:52 crc kubenswrapper[4703]: I0130 12:48:52.011629 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Jan 30 12:48:52 crc kubenswrapper[4703]: W0130 12:48:52.013186 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod546bd0aa_e89f_4f5f_bd6a_36a8a79f5f7e.slice/crio-cbf6eecc59b3ddcd51d1fe9765e976244af6e9073c2e9e2f80d2aa8c6a5ff287 WatchSource:0}: Error finding container cbf6eecc59b3ddcd51d1fe9765e976244af6e9073c2e9e2f80d2aa8c6a5ff287: Status 404 returned error can't find the container with id cbf6eecc59b3ddcd51d1fe9765e976244af6e9073c2e9e2f80d2aa8c6a5ff287 Jan 30 12:48:52 crc kubenswrapper[4703]: I0130 12:48:52.816093 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"546bd0aa-e89f-4f5f-bd6a-36a8a79f5f7e","Type":"ContainerStarted","Data":"cbf6eecc59b3ddcd51d1fe9765e976244af6e9073c2e9e2f80d2aa8c6a5ff287"} Jan 30 12:48:54 crc kubenswrapper[4703]: I0130 12:48:54.841979 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"546bd0aa-e89f-4f5f-bd6a-36a8a79f5f7e","Type":"ContainerStarted","Data":"e2fbfc5452f139d1f862b032884581832cfb809b6195675adf3212fc4ad6f80e"} Jan 30 12:48:54 crc kubenswrapper[4703]: I0130 12:48:54.868452 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.081217656 podStartE2EDuration="3.868427296s" podCreationTimestamp="2026-01-30 12:48:51 +0000 UTC" firstStartedPulling="2026-01-30 12:48:52.017077095 +0000 UTC m=+3167.794898749" lastFinishedPulling="2026-01-30 12:48:53.804286735 +0000 UTC m=+3169.582108389" observedRunningTime="2026-01-30 12:48:54.860613899 +0000 UTC m=+3170.638435573" watchObservedRunningTime="2026-01-30 12:48:54.868427296 +0000 UTC m=+3170.646248940" Jan 30 12:48:55 crc kubenswrapper[4703]: I0130 12:48:55.094350 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:48:55 crc kubenswrapper[4703]: E0130 12:48:55.094625 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:48:59 crc kubenswrapper[4703]: I0130 12:48:59.086670 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:48:59 crc kubenswrapper[4703]: E0130 12:48:59.087717 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:49:10 crc kubenswrapper[4703]: I0130 12:49:10.087979 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:49:10 crc kubenswrapper[4703]: E0130 12:49:10.088826 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:49:11 crc kubenswrapper[4703]: I0130 12:49:11.087071 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:49:11 crc kubenswrapper[4703]: E0130 12:49:11.087939 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:49:21 crc kubenswrapper[4703]: I0130 12:49:21.086825 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:49:21 crc kubenswrapper[4703]: E0130 12:49:21.087938 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:49:24 crc kubenswrapper[4703]: I0130 12:49:24.087455 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:49:24 crc kubenswrapper[4703]: E0130 12:49:24.089652 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:49:31 crc kubenswrapper[4703]: I0130 12:49:31.634020 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-z4rlc/must-gather-gdfpw"] Jan 30 12:49:31 crc kubenswrapper[4703]: I0130 12:49:31.649319 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z4rlc/must-gather-gdfpw" Jan 30 12:49:31 crc kubenswrapper[4703]: I0130 12:49:31.677629 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-z4rlc"/"openshift-service-ca.crt" Jan 30 12:49:31 crc kubenswrapper[4703]: I0130 12:49:31.682177 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-z4rlc"/"kube-root-ca.crt" Jan 30 12:49:31 crc kubenswrapper[4703]: I0130 12:49:31.684737 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-z4rlc/must-gather-gdfpw"] Jan 30 12:49:31 crc kubenswrapper[4703]: I0130 12:49:31.795113 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/7c916403-f0bc-47c8-bd85-d9977a5d9fa8-must-gather-output\") pod \"must-gather-gdfpw\" (UID: \"7c916403-f0bc-47c8-bd85-d9977a5d9fa8\") " pod="openshift-must-gather-z4rlc/must-gather-gdfpw" Jan 30 12:49:31 crc kubenswrapper[4703]: I0130 12:49:31.795414 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-458g8\" (UniqueName: \"kubernetes.io/projected/7c916403-f0bc-47c8-bd85-d9977a5d9fa8-kube-api-access-458g8\") pod \"must-gather-gdfpw\" (UID: \"7c916403-f0bc-47c8-bd85-d9977a5d9fa8\") " pod="openshift-must-gather-z4rlc/must-gather-gdfpw" Jan 30 12:49:31 crc kubenswrapper[4703]: I0130 12:49:31.898098 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/7c916403-f0bc-47c8-bd85-d9977a5d9fa8-must-gather-output\") pod \"must-gather-gdfpw\" (UID: \"7c916403-f0bc-47c8-bd85-d9977a5d9fa8\") " pod="openshift-must-gather-z4rlc/must-gather-gdfpw" Jan 30 12:49:31 crc kubenswrapper[4703]: I0130 12:49:31.898244 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-458g8\" (UniqueName: \"kubernetes.io/projected/7c916403-f0bc-47c8-bd85-d9977a5d9fa8-kube-api-access-458g8\") pod \"must-gather-gdfpw\" (UID: \"7c916403-f0bc-47c8-bd85-d9977a5d9fa8\") " pod="openshift-must-gather-z4rlc/must-gather-gdfpw" Jan 30 12:49:31 crc kubenswrapper[4703]: I0130 12:49:31.898539 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/7c916403-f0bc-47c8-bd85-d9977a5d9fa8-must-gather-output\") pod \"must-gather-gdfpw\" (UID: \"7c916403-f0bc-47c8-bd85-d9977a5d9fa8\") " pod="openshift-must-gather-z4rlc/must-gather-gdfpw" Jan 30 12:49:31 crc kubenswrapper[4703]: I0130 12:49:31.920616 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-458g8\" (UniqueName: \"kubernetes.io/projected/7c916403-f0bc-47c8-bd85-d9977a5d9fa8-kube-api-access-458g8\") pod \"must-gather-gdfpw\" (UID: \"7c916403-f0bc-47c8-bd85-d9977a5d9fa8\") " pod="openshift-must-gather-z4rlc/must-gather-gdfpw" Jan 30 12:49:31 crc kubenswrapper[4703]: I0130 12:49:31.988324 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z4rlc/must-gather-gdfpw" Jan 30 12:49:32 crc kubenswrapper[4703]: I0130 12:49:32.087252 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:49:32 crc kubenswrapper[4703]: E0130 12:49:32.088266 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:49:33 crc kubenswrapper[4703]: I0130 12:49:33.353497 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-z4rlc/must-gather-gdfpw"] Jan 30 12:49:34 crc kubenswrapper[4703]: I0130 12:49:34.307707 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z4rlc/must-gather-gdfpw" event={"ID":"7c916403-f0bc-47c8-bd85-d9977a5d9fa8","Type":"ContainerStarted","Data":"12a2bd4fe00df82f7a1a34a70fd1e4845db753bc4576c22fb3d20dd8059ca6a0"} Jan 30 12:49:36 crc kubenswrapper[4703]: I0130 12:49:36.088554 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:49:36 crc kubenswrapper[4703]: E0130 12:49:36.089784 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:49:39 crc kubenswrapper[4703]: I0130 12:49:39.368186 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z4rlc/must-gather-gdfpw" event={"ID":"7c916403-f0bc-47c8-bd85-d9977a5d9fa8","Type":"ContainerStarted","Data":"48d417a3c7be1853b19914b7bb163ca2d115fedcee04898512dba6c339fe88b5"} Jan 30 12:49:39 crc kubenswrapper[4703]: I0130 12:49:39.368890 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z4rlc/must-gather-gdfpw" event={"ID":"7c916403-f0bc-47c8-bd85-d9977a5d9fa8","Type":"ContainerStarted","Data":"ddbbd288d8c1469e5ffa8b08baee0389538f728dff5093cbd5f4c145fb79e106"} Jan 30 12:49:39 crc kubenswrapper[4703]: I0130 12:49:39.398392 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-z4rlc/must-gather-gdfpw" podStartSLOduration=3.644020784 podStartE2EDuration="8.398297255s" podCreationTimestamp="2026-01-30 12:49:31 +0000 UTC" firstStartedPulling="2026-01-30 12:49:33.370777871 +0000 UTC m=+3209.148599525" lastFinishedPulling="2026-01-30 12:49:38.125054342 +0000 UTC m=+3213.902875996" observedRunningTime="2026-01-30 12:49:39.390430197 +0000 UTC m=+3215.168251861" watchObservedRunningTime="2026-01-30 12:49:39.398297255 +0000 UTC m=+3215.176118909" Jan 30 12:49:42 crc kubenswrapper[4703]: I0130 12:49:42.934276 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-z4rlc/crc-debug-pq72v"] Jan 30 12:49:42 crc kubenswrapper[4703]: I0130 12:49:42.936369 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z4rlc/crc-debug-pq72v" Jan 30 12:49:42 crc kubenswrapper[4703]: I0130 12:49:42.940117 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-z4rlc"/"default-dockercfg-v9jvz" Jan 30 12:49:43 crc kubenswrapper[4703]: I0130 12:49:43.060043 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmqk9\" (UniqueName: \"kubernetes.io/projected/20827d09-80e4-4995-a3d9-e8bea95fd833-kube-api-access-tmqk9\") pod \"crc-debug-pq72v\" (UID: \"20827d09-80e4-4995-a3d9-e8bea95fd833\") " pod="openshift-must-gather-z4rlc/crc-debug-pq72v" Jan 30 12:49:43 crc kubenswrapper[4703]: I0130 12:49:43.060118 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/20827d09-80e4-4995-a3d9-e8bea95fd833-host\") pod \"crc-debug-pq72v\" (UID: \"20827d09-80e4-4995-a3d9-e8bea95fd833\") " pod="openshift-must-gather-z4rlc/crc-debug-pq72v" Jan 30 12:49:43 crc kubenswrapper[4703]: I0130 12:49:43.164744 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmqk9\" (UniqueName: \"kubernetes.io/projected/20827d09-80e4-4995-a3d9-e8bea95fd833-kube-api-access-tmqk9\") pod \"crc-debug-pq72v\" (UID: \"20827d09-80e4-4995-a3d9-e8bea95fd833\") " pod="openshift-must-gather-z4rlc/crc-debug-pq72v" Jan 30 12:49:43 crc kubenswrapper[4703]: I0130 12:49:43.164838 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/20827d09-80e4-4995-a3d9-e8bea95fd833-host\") pod \"crc-debug-pq72v\" (UID: \"20827d09-80e4-4995-a3d9-e8bea95fd833\") " pod="openshift-must-gather-z4rlc/crc-debug-pq72v" Jan 30 12:49:43 crc kubenswrapper[4703]: I0130 12:49:43.167559 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/20827d09-80e4-4995-a3d9-e8bea95fd833-host\") pod \"crc-debug-pq72v\" (UID: \"20827d09-80e4-4995-a3d9-e8bea95fd833\") " pod="openshift-must-gather-z4rlc/crc-debug-pq72v" Jan 30 12:49:43 crc kubenswrapper[4703]: I0130 12:49:43.197395 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmqk9\" (UniqueName: \"kubernetes.io/projected/20827d09-80e4-4995-a3d9-e8bea95fd833-kube-api-access-tmqk9\") pod \"crc-debug-pq72v\" (UID: \"20827d09-80e4-4995-a3d9-e8bea95fd833\") " pod="openshift-must-gather-z4rlc/crc-debug-pq72v" Jan 30 12:49:43 crc kubenswrapper[4703]: I0130 12:49:43.259876 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z4rlc/crc-debug-pq72v" Jan 30 12:49:43 crc kubenswrapper[4703]: I0130 12:49:43.430472 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z4rlc/crc-debug-pq72v" event={"ID":"20827d09-80e4-4995-a3d9-e8bea95fd833","Type":"ContainerStarted","Data":"8095a8597620d7e3d2a1419a50d8dc92379a2c3de54090e5a63c057f13134d6b"} Jan 30 12:49:46 crc kubenswrapper[4703]: I0130 12:49:46.087910 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:49:46 crc kubenswrapper[4703]: E0130 12:49:46.089332 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:49:48 crc kubenswrapper[4703]: I0130 12:49:48.087218 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:49:48 crc kubenswrapper[4703]: E0130 12:49:48.087931 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:49:54 crc kubenswrapper[4703]: I0130 12:49:54.405563 4703 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-2wfxz container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.65:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 30 12:49:54 crc kubenswrapper[4703]: I0130 12:49:54.447304 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-2wfxz" podUID="f0dd4153-47cd-40a1-b929-69ecba1b33f4" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.65:8080/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 12:49:57 crc kubenswrapper[4703]: I0130 12:49:57.483992 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z4rlc/crc-debug-pq72v" event={"ID":"20827d09-80e4-4995-a3d9-e8bea95fd833","Type":"ContainerStarted","Data":"95ccdd61e4ba5dafc9e746fae91da03691f84ebe82afd4139d6898b41360939b"} Jan 30 12:49:57 crc kubenswrapper[4703]: I0130 12:49:57.501659 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-z4rlc/crc-debug-pq72v" podStartSLOduration=2.115214898 podStartE2EDuration="15.501609486s" podCreationTimestamp="2026-01-30 12:49:42 +0000 UTC" firstStartedPulling="2026-01-30 12:49:43.308106387 +0000 UTC m=+3219.085928051" lastFinishedPulling="2026-01-30 12:49:56.694500984 +0000 UTC m=+3232.472322639" observedRunningTime="2026-01-30 12:49:57.500288981 +0000 UTC m=+3233.278110635" watchObservedRunningTime="2026-01-30 12:49:57.501609486 +0000 UTC m=+3233.279431140" Jan 30 12:49:59 crc kubenswrapper[4703]: I0130 12:49:59.087111 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:49:59 crc kubenswrapper[4703]: E0130 12:49:59.090246 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:50:00 crc kubenswrapper[4703]: I0130 12:50:00.087460 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:50:00 crc kubenswrapper[4703]: E0130 12:50:00.088649 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:50:11 crc kubenswrapper[4703]: I0130 12:50:11.088031 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:50:11 crc kubenswrapper[4703]: E0130 12:50:11.089249 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:50:15 crc kubenswrapper[4703]: I0130 12:50:15.096474 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:50:15 crc kubenswrapper[4703]: E0130 12:50:15.097792 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:50:19 crc kubenswrapper[4703]: I0130 12:50:19.735461 4703 generic.go:334] "Generic (PLEG): container finished" podID="20827d09-80e4-4995-a3d9-e8bea95fd833" containerID="95ccdd61e4ba5dafc9e746fae91da03691f84ebe82afd4139d6898b41360939b" exitCode=0 Jan 30 12:50:19 crc kubenswrapper[4703]: I0130 12:50:19.735578 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z4rlc/crc-debug-pq72v" event={"ID":"20827d09-80e4-4995-a3d9-e8bea95fd833","Type":"ContainerDied","Data":"95ccdd61e4ba5dafc9e746fae91da03691f84ebe82afd4139d6898b41360939b"} Jan 30 12:50:20 crc kubenswrapper[4703]: I0130 12:50:20.903269 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z4rlc/crc-debug-pq72v" Jan 30 12:50:20 crc kubenswrapper[4703]: I0130 12:50:20.951914 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-z4rlc/crc-debug-pq72v"] Jan 30 12:50:20 crc kubenswrapper[4703]: I0130 12:50:20.964301 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-z4rlc/crc-debug-pq72v"] Jan 30 12:50:21 crc kubenswrapper[4703]: I0130 12:50:21.018451 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/20827d09-80e4-4995-a3d9-e8bea95fd833-host\") pod \"20827d09-80e4-4995-a3d9-e8bea95fd833\" (UID: \"20827d09-80e4-4995-a3d9-e8bea95fd833\") " Jan 30 12:50:21 crc kubenswrapper[4703]: I0130 12:50:21.018671 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmqk9\" (UniqueName: \"kubernetes.io/projected/20827d09-80e4-4995-a3d9-e8bea95fd833-kube-api-access-tmqk9\") pod \"20827d09-80e4-4995-a3d9-e8bea95fd833\" (UID: \"20827d09-80e4-4995-a3d9-e8bea95fd833\") " Jan 30 12:50:21 crc kubenswrapper[4703]: I0130 12:50:21.020370 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/20827d09-80e4-4995-a3d9-e8bea95fd833-host" (OuterVolumeSpecName: "host") pod "20827d09-80e4-4995-a3d9-e8bea95fd833" (UID: "20827d09-80e4-4995-a3d9-e8bea95fd833"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:50:21 crc kubenswrapper[4703]: I0130 12:50:21.030681 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20827d09-80e4-4995-a3d9-e8bea95fd833-kube-api-access-tmqk9" (OuterVolumeSpecName: "kube-api-access-tmqk9") pod "20827d09-80e4-4995-a3d9-e8bea95fd833" (UID: "20827d09-80e4-4995-a3d9-e8bea95fd833"). InnerVolumeSpecName "kube-api-access-tmqk9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:50:21 crc kubenswrapper[4703]: I0130 12:50:21.100879 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20827d09-80e4-4995-a3d9-e8bea95fd833" path="/var/lib/kubelet/pods/20827d09-80e4-4995-a3d9-e8bea95fd833/volumes" Jan 30 12:50:21 crc kubenswrapper[4703]: I0130 12:50:21.121642 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmqk9\" (UniqueName: \"kubernetes.io/projected/20827d09-80e4-4995-a3d9-e8bea95fd833-kube-api-access-tmqk9\") on node \"crc\" DevicePath \"\"" Jan 30 12:50:21 crc kubenswrapper[4703]: I0130 12:50:21.121692 4703 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/20827d09-80e4-4995-a3d9-e8bea95fd833-host\") on node \"crc\" DevicePath \"\"" Jan 30 12:50:21 crc kubenswrapper[4703]: I0130 12:50:21.770151 4703 scope.go:117] "RemoveContainer" containerID="95ccdd61e4ba5dafc9e746fae91da03691f84ebe82afd4139d6898b41360939b" Jan 30 12:50:21 crc kubenswrapper[4703]: I0130 12:50:21.770365 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z4rlc/crc-debug-pq72v" Jan 30 12:50:22 crc kubenswrapper[4703]: I0130 12:50:22.239786 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-z4rlc/crc-debug-xbj6p"] Jan 30 12:50:22 crc kubenswrapper[4703]: E0130 12:50:22.240878 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20827d09-80e4-4995-a3d9-e8bea95fd833" containerName="container-00" Jan 30 12:50:22 crc kubenswrapper[4703]: I0130 12:50:22.240896 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="20827d09-80e4-4995-a3d9-e8bea95fd833" containerName="container-00" Jan 30 12:50:22 crc kubenswrapper[4703]: I0130 12:50:22.241099 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="20827d09-80e4-4995-a3d9-e8bea95fd833" containerName="container-00" Jan 30 12:50:22 crc kubenswrapper[4703]: I0130 12:50:22.244976 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z4rlc/crc-debug-xbj6p" Jan 30 12:50:22 crc kubenswrapper[4703]: I0130 12:50:22.249541 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-z4rlc"/"default-dockercfg-v9jvz" Jan 30 12:50:22 crc kubenswrapper[4703]: I0130 12:50:22.349815 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e3dd021a-377c-4f6e-871b-463c8e7b39d3-host\") pod \"crc-debug-xbj6p\" (UID: \"e3dd021a-377c-4f6e-871b-463c8e7b39d3\") " pod="openshift-must-gather-z4rlc/crc-debug-xbj6p" Jan 30 12:50:22 crc kubenswrapper[4703]: I0130 12:50:22.350033 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nb927\" (UniqueName: \"kubernetes.io/projected/e3dd021a-377c-4f6e-871b-463c8e7b39d3-kube-api-access-nb927\") pod \"crc-debug-xbj6p\" (UID: \"e3dd021a-377c-4f6e-871b-463c8e7b39d3\") " pod="openshift-must-gather-z4rlc/crc-debug-xbj6p" Jan 30 12:50:22 crc kubenswrapper[4703]: I0130 12:50:22.453082 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e3dd021a-377c-4f6e-871b-463c8e7b39d3-host\") pod \"crc-debug-xbj6p\" (UID: \"e3dd021a-377c-4f6e-871b-463c8e7b39d3\") " pod="openshift-must-gather-z4rlc/crc-debug-xbj6p" Jan 30 12:50:22 crc kubenswrapper[4703]: I0130 12:50:22.453302 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e3dd021a-377c-4f6e-871b-463c8e7b39d3-host\") pod \"crc-debug-xbj6p\" (UID: \"e3dd021a-377c-4f6e-871b-463c8e7b39d3\") " pod="openshift-must-gather-z4rlc/crc-debug-xbj6p" Jan 30 12:50:22 crc kubenswrapper[4703]: I0130 12:50:22.453598 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nb927\" (UniqueName: \"kubernetes.io/projected/e3dd021a-377c-4f6e-871b-463c8e7b39d3-kube-api-access-nb927\") pod \"crc-debug-xbj6p\" (UID: \"e3dd021a-377c-4f6e-871b-463c8e7b39d3\") " pod="openshift-must-gather-z4rlc/crc-debug-xbj6p" Jan 30 12:50:22 crc kubenswrapper[4703]: I0130 12:50:22.481180 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nb927\" (UniqueName: \"kubernetes.io/projected/e3dd021a-377c-4f6e-871b-463c8e7b39d3-kube-api-access-nb927\") pod \"crc-debug-xbj6p\" (UID: \"e3dd021a-377c-4f6e-871b-463c8e7b39d3\") " pod="openshift-must-gather-z4rlc/crc-debug-xbj6p" Jan 30 12:50:22 crc kubenswrapper[4703]: I0130 12:50:22.569921 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z4rlc/crc-debug-xbj6p" Jan 30 12:50:22 crc kubenswrapper[4703]: I0130 12:50:22.790499 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z4rlc/crc-debug-xbj6p" event={"ID":"e3dd021a-377c-4f6e-871b-463c8e7b39d3","Type":"ContainerStarted","Data":"f3b4f9e34889bfa3f6eb3760304daa649c33db18c38f890e10cfba2aa06b6058"} Jan 30 12:50:23 crc kubenswrapper[4703]: I0130 12:50:23.092950 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:50:23 crc kubenswrapper[4703]: E0130 12:50:23.093299 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:50:23 crc kubenswrapper[4703]: I0130 12:50:23.805707 4703 generic.go:334] "Generic (PLEG): container finished" podID="e3dd021a-377c-4f6e-871b-463c8e7b39d3" containerID="9f7ceb50c2cc88da664a3c33983f3c8726eb368ad783a2c82a3477b6f94382dd" exitCode=1 Jan 30 12:50:23 crc kubenswrapper[4703]: I0130 12:50:23.805821 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z4rlc/crc-debug-xbj6p" event={"ID":"e3dd021a-377c-4f6e-871b-463c8e7b39d3","Type":"ContainerDied","Data":"9f7ceb50c2cc88da664a3c33983f3c8726eb368ad783a2c82a3477b6f94382dd"} Jan 30 12:50:23 crc kubenswrapper[4703]: I0130 12:50:23.858731 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-z4rlc/crc-debug-xbj6p"] Jan 30 12:50:23 crc kubenswrapper[4703]: I0130 12:50:23.871111 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-z4rlc/crc-debug-xbj6p"] Jan 30 12:50:24 crc kubenswrapper[4703]: I0130 12:50:24.941162 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z4rlc/crc-debug-xbj6p" Jan 30 12:50:25 crc kubenswrapper[4703]: I0130 12:50:25.046178 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e3dd021a-377c-4f6e-871b-463c8e7b39d3-host\") pod \"e3dd021a-377c-4f6e-871b-463c8e7b39d3\" (UID: \"e3dd021a-377c-4f6e-871b-463c8e7b39d3\") " Jan 30 12:50:25 crc kubenswrapper[4703]: I0130 12:50:25.046298 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e3dd021a-377c-4f6e-871b-463c8e7b39d3-host" (OuterVolumeSpecName: "host") pod "e3dd021a-377c-4f6e-871b-463c8e7b39d3" (UID: "e3dd021a-377c-4f6e-871b-463c8e7b39d3"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 30 12:50:25 crc kubenswrapper[4703]: I0130 12:50:25.046473 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nb927\" (UniqueName: \"kubernetes.io/projected/e3dd021a-377c-4f6e-871b-463c8e7b39d3-kube-api-access-nb927\") pod \"e3dd021a-377c-4f6e-871b-463c8e7b39d3\" (UID: \"e3dd021a-377c-4f6e-871b-463c8e7b39d3\") " Jan 30 12:50:25 crc kubenswrapper[4703]: I0130 12:50:25.047380 4703 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e3dd021a-377c-4f6e-871b-463c8e7b39d3-host\") on node \"crc\" DevicePath \"\"" Jan 30 12:50:25 crc kubenswrapper[4703]: I0130 12:50:25.058461 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3dd021a-377c-4f6e-871b-463c8e7b39d3-kube-api-access-nb927" (OuterVolumeSpecName: "kube-api-access-nb927") pod "e3dd021a-377c-4f6e-871b-463c8e7b39d3" (UID: "e3dd021a-377c-4f6e-871b-463c8e7b39d3"). InnerVolumeSpecName "kube-api-access-nb927". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:50:25 crc kubenswrapper[4703]: I0130 12:50:25.100315 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3dd021a-377c-4f6e-871b-463c8e7b39d3" path="/var/lib/kubelet/pods/e3dd021a-377c-4f6e-871b-463c8e7b39d3/volumes" Jan 30 12:50:25 crc kubenswrapper[4703]: I0130 12:50:25.153715 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nb927\" (UniqueName: \"kubernetes.io/projected/e3dd021a-377c-4f6e-871b-463c8e7b39d3-kube-api-access-nb927\") on node \"crc\" DevicePath \"\"" Jan 30 12:50:25 crc kubenswrapper[4703]: I0130 12:50:25.920938 4703 scope.go:117] "RemoveContainer" containerID="9f7ceb50c2cc88da664a3c33983f3c8726eb368ad783a2c82a3477b6f94382dd" Jan 30 12:50:25 crc kubenswrapper[4703]: I0130 12:50:25.921094 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z4rlc/crc-debug-xbj6p" Jan 30 12:50:30 crc kubenswrapper[4703]: I0130 12:50:30.087932 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:50:30 crc kubenswrapper[4703]: E0130 12:50:30.088827 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:50:37 crc kubenswrapper[4703]: I0130 12:50:37.087484 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:50:37 crc kubenswrapper[4703]: E0130 12:50:37.088532 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:50:45 crc kubenswrapper[4703]: I0130 12:50:45.087577 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:50:45 crc kubenswrapper[4703]: E0130 12:50:45.091520 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:50:48 crc kubenswrapper[4703]: I0130 12:50:48.087108 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:50:48 crc kubenswrapper[4703]: E0130 12:50:48.088357 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:50:58 crc kubenswrapper[4703]: I0130 12:50:58.087490 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:50:58 crc kubenswrapper[4703]: E0130 12:50:58.089099 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:51:02 crc kubenswrapper[4703]: I0130 12:51:02.087192 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:51:02 crc kubenswrapper[4703]: E0130 12:51:02.088179 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:51:12 crc kubenswrapper[4703]: I0130 12:51:12.087424 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:51:12 crc kubenswrapper[4703]: E0130 12:51:12.088641 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:51:15 crc kubenswrapper[4703]: I0130 12:51:15.094366 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:51:15 crc kubenswrapper[4703]: I0130 12:51:15.712312 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerStarted","Data":"11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2"} Jan 30 12:51:16 crc kubenswrapper[4703]: I0130 12:51:16.034831 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:51:16 crc kubenswrapper[4703]: I0130 12:51:16.034906 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:51:16 crc kubenswrapper[4703]: I0130 12:51:16.067210 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 30 12:51:16 crc kubenswrapper[4703]: I0130 12:51:16.760435 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 30 12:51:19 crc kubenswrapper[4703]: I0130 12:51:19.764493 4703 generic.go:334] "Generic (PLEG): container finished" podID="2fc19a6b-3cde-4bb5-9499-f5be846289da" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" exitCode=1 Jan 30 12:51:19 crc kubenswrapper[4703]: I0130 12:51:19.764579 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerDied","Data":"11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2"} Jan 30 12:51:19 crc kubenswrapper[4703]: I0130 12:51:19.766251 4703 scope.go:117] "RemoveContainer" containerID="56fdd66d222e9b5d87e5f1d5b0237c1a3206abf07c46b3ec3261836bd046f002" Jan 30 12:51:19 crc kubenswrapper[4703]: I0130 12:51:19.767294 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:51:19 crc kubenswrapper[4703]: E0130 12:51:19.767684 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:51:21 crc kubenswrapper[4703]: I0130 12:51:21.035315 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:51:21 crc kubenswrapper[4703]: I0130 12:51:21.036884 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:51:21 crc kubenswrapper[4703]: E0130 12:51:21.037420 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:51:25 crc kubenswrapper[4703]: I0130 12:51:25.096574 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:51:25 crc kubenswrapper[4703]: E0130 12:51:25.099654 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:51:26 crc kubenswrapper[4703]: I0130 12:51:26.034606 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:51:26 crc kubenswrapper[4703]: I0130 12:51:26.034683 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:51:26 crc kubenswrapper[4703]: I0130 12:51:26.035968 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:51:26 crc kubenswrapper[4703]: E0130 12:51:26.036465 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:51:29 crc kubenswrapper[4703]: I0130 12:51:29.869044 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-78d958758-q982d_0f2e50f0-ca1d-4b8f-b284-cbf7069e1279/barbican-api/0.log" Jan 30 12:51:30 crc kubenswrapper[4703]: I0130 12:51:30.007100 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-78d958758-q982d_0f2e50f0-ca1d-4b8f-b284-cbf7069e1279/barbican-api-log/0.log" Jan 30 12:51:30 crc kubenswrapper[4703]: I0130 12:51:30.102991 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-97468ff8-5bxdm_58305133-2318-4520-a3cc-bca7a1d61895/barbican-keystone-listener/0.log" Jan 30 12:51:30 crc kubenswrapper[4703]: I0130 12:51:30.131572 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-97468ff8-5bxdm_58305133-2318-4520-a3cc-bca7a1d61895/barbican-keystone-listener-log/0.log" Jan 30 12:51:30 crc kubenswrapper[4703]: I0130 12:51:30.641074 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-57d7c7f95-fngdp_f8d5f833-0ceb-4dc6-bb87-ce670386ef8b/barbican-worker/0.log" Jan 30 12:51:30 crc kubenswrapper[4703]: I0130 12:51:30.666350 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-57d7c7f95-fngdp_f8d5f833-0ceb-4dc6-bb87-ce670386ef8b/barbican-worker-log/0.log" Jan 30 12:51:30 crc kubenswrapper[4703]: I0130 12:51:30.864266 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-gt94t_d7bb23ae-c393-4b73-a856-b61d160d513d/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Jan 30 12:51:30 crc kubenswrapper[4703]: I0130 12:51:30.949692 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_3327faec-a7ba-4cd1-987e-b9642a6d6eed/ceilometer-central-agent/0.log" Jan 30 12:51:31 crc kubenswrapper[4703]: I0130 12:51:31.070561 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_3327faec-a7ba-4cd1-987e-b9642a6d6eed/ceilometer-notification-agent/0.log" Jan 30 12:51:31 crc kubenswrapper[4703]: I0130 12:51:31.121423 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_3327faec-a7ba-4cd1-987e-b9642a6d6eed/proxy-httpd/0.log" Jan 30 12:51:31 crc kubenswrapper[4703]: I0130 12:51:31.187746 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_3327faec-a7ba-4cd1-987e-b9642a6d6eed/sg-core/0.log" Jan 30 12:51:31 crc kubenswrapper[4703]: I0130 12:51:31.352932 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_cbd06273-19f7-4051-aad3-5a9ea641cfc4/cinder-api/0.log" Jan 30 12:51:31 crc kubenswrapper[4703]: I0130 12:51:31.386263 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_cbd06273-19f7-4051-aad3-5a9ea641cfc4/cinder-api-log/0.log" Jan 30 12:51:31 crc kubenswrapper[4703]: I0130 12:51:31.561099 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_3b8e1d2e-960f-47aa-b6d0-f327ecdd5880/cinder-scheduler/0.log" Jan 30 12:51:31 crc kubenswrapper[4703]: I0130 12:51:31.688009 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_3b8e1d2e-960f-47aa-b6d0-f327ecdd5880/probe/0.log" Jan 30 12:51:31 crc kubenswrapper[4703]: I0130 12:51:31.867808 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-lhkn2_2a36d42d-77b8-4ba1-8253-909a9bfb0c4a/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 30 12:51:31 crc kubenswrapper[4703]: I0130 12:51:31.994734 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-42gg7_dbcb8ade-a107-4435-8ee5-e27e4bb95998/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 30 12:51:32 crc kubenswrapper[4703]: I0130 12:51:32.161753 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-p5rn7_50c4f4f0-c935-406c-86a7-791a8dd0e812/init/0.log" Jan 30 12:51:32 crc kubenswrapper[4703]: I0130 12:51:32.642569 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-p5rn7_50c4f4f0-c935-406c-86a7-791a8dd0e812/init/0.log" Jan 30 12:51:32 crc kubenswrapper[4703]: I0130 12:51:32.683454 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-vmszn_e0374bdb-2201-41a5-90ff-2185eac3add1/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Jan 30 12:51:32 crc kubenswrapper[4703]: I0130 12:51:32.780233 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6cd9bffc9-p5rn7_50c4f4f0-c935-406c-86a7-791a8dd0e812/dnsmasq-dns/0.log" Jan 30 12:51:32 crc kubenswrapper[4703]: I0130 12:51:32.876648 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9/glance-httpd/0.log" Jan 30 12:51:32 crc kubenswrapper[4703]: I0130 12:51:32.978040 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_0b7ec9d2-bc0b-4ccc-9572-651fc9e14ca9/glance-log/0.log" Jan 30 12:51:33 crc kubenswrapper[4703]: I0130 12:51:33.308205 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_a12e4088-00b9-416c-92a8-a40b997f06ea/glance-httpd/0.log" Jan 30 12:51:33 crc kubenswrapper[4703]: I0130 12:51:33.422992 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_a12e4088-00b9-416c-92a8-a40b997f06ea/glance-log/0.log" Jan 30 12:51:33 crc kubenswrapper[4703]: I0130 12:51:33.684652 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-77fb4cf9b8-pw692_9c6d3262-7469-45ac-b5c8-9eb0f9456a5a/horizon/1.log" Jan 30 12:51:33 crc kubenswrapper[4703]: I0130 12:51:33.710906 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-77fb4cf9b8-pw692_9c6d3262-7469-45ac-b5c8-9eb0f9456a5a/horizon/2.log" Jan 30 12:51:34 crc kubenswrapper[4703]: I0130 12:51:34.047829 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-fxxfq_9d2e464d-58a6-45d6-bc48-4bf88f246501/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Jan 30 12:51:34 crc kubenswrapper[4703]: I0130 12:51:34.091377 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-zbg8b_3dc7e43f-3f6e-4d19-970d-bd84acc019bd/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 30 12:51:34 crc kubenswrapper[4703]: I0130 12:51:34.092892 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-77fb4cf9b8-pw692_9c6d3262-7469-45ac-b5c8-9eb0f9456a5a/horizon-log/0.log" Jan 30 12:51:34 crc kubenswrapper[4703]: I0130 12:51:34.350157 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_18ac36a5-1494-44b5-9a0a-cd275513992a/kube-state-metrics/0.log" Jan 30 12:51:34 crc kubenswrapper[4703]: I0130 12:51:34.456226 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-7946f9d8b5-42hg8_67c4cf21-bf63-4d34-a72a-4e881bcc2c7d/keystone-api/0.log" Jan 30 12:51:34 crc kubenswrapper[4703]: I0130 12:51:34.642156 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-tctmb_201fa630-41e7-4070-9460-1f1b10397de8/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Jan 30 12:51:34 crc kubenswrapper[4703]: I0130 12:51:34.897852 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7ccfb848f-k68gw_68a6efb3-cffb-4397-922b-c8486b399f76/neutron-httpd/0.log" Jan 30 12:51:34 crc kubenswrapper[4703]: I0130 12:51:34.971921 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7ccfb848f-k68gw_68a6efb3-cffb-4397-922b-c8486b399f76/neutron-api/0.log" Jan 30 12:51:35 crc kubenswrapper[4703]: I0130 12:51:35.241866 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-bxfld_bb1b3231-4e16-4378-8c21-0c59eaa8dd29/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Jan 30 12:51:35 crc kubenswrapper[4703]: I0130 12:51:35.462062 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_5d551d53-088a-4087-9a64-f26c936a32fe/nova-api-log/0.log" Jan 30 12:51:35 crc kubenswrapper[4703]: I0130 12:51:35.587629 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_5d551d53-088a-4087-9a64-f26c936a32fe/nova-api-api/0.log" Jan 30 12:51:35 crc kubenswrapper[4703]: I0130 12:51:35.725318 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_e48a9fe8-36bf-41e5-920e-116bb6237828/nova-cell0-conductor-conductor/0.log" Jan 30 12:51:36 crc kubenswrapper[4703]: I0130 12:51:36.047805 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_db36ff33-3188-4e4b-ab14-8201ea11f938/nova-cell1-conductor-conductor/0.log" Jan 30 12:51:36 crc kubenswrapper[4703]: I0130 12:51:36.141937 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_9b602062-70c1-4485-87b8-a4aedb97350d/nova-cell1-novncproxy-novncproxy/0.log" Jan 30 12:51:36 crc kubenswrapper[4703]: I0130 12:51:36.339439 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-d9klv_79c7c216-3c2c-4744-b6c5-d97feb476cdd/nova-edpm-deployment-openstack-edpm-ipam/0.log" Jan 30 12:51:36 crc kubenswrapper[4703]: I0130 12:51:36.494816 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_d4411a18-e909-4a1c-8e14-7c2014546ab6/nova-metadata-log/0.log" Jan 30 12:51:36 crc kubenswrapper[4703]: I0130 12:51:36.780181 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_2fc19a6b-3cde-4bb5-9499-f5be846289da/nova-scheduler-scheduler/10.log" Jan 30 12:51:36 crc kubenswrapper[4703]: I0130 12:51:36.780528 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_2fc19a6b-3cde-4bb5-9499-f5be846289da/nova-scheduler-scheduler/10.log" Jan 30 12:51:37 crc kubenswrapper[4703]: I0130 12:51:37.006008 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_e5c696fa-999f-48c2-bf1a-e015ec7e7ab1/mysql-bootstrap/0.log" Jan 30 12:51:37 crc kubenswrapper[4703]: I0130 12:51:37.350533 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_e5c696fa-999f-48c2-bf1a-e015ec7e7ab1/mysql-bootstrap/0.log" Jan 30 12:51:37 crc kubenswrapper[4703]: I0130 12:51:37.427216 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_e5c696fa-999f-48c2-bf1a-e015ec7e7ab1/galera/0.log" Jan 30 12:51:37 crc kubenswrapper[4703]: I0130 12:51:37.445200 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_d4411a18-e909-4a1c-8e14-7c2014546ab6/nova-metadata-metadata/0.log" Jan 30 12:51:37 crc kubenswrapper[4703]: I0130 12:51:37.597153 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f0ae0323-f870-408b-b688-df1b4e3e8da6/mysql-bootstrap/0.log" Jan 30 12:51:37 crc kubenswrapper[4703]: I0130 12:51:37.819112 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f0ae0323-f870-408b-b688-df1b4e3e8da6/mysql-bootstrap/0.log" Jan 30 12:51:37 crc kubenswrapper[4703]: I0130 12:51:37.855760 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f0ae0323-f870-408b-b688-df1b4e3e8da6/galera/0.log" Jan 30 12:51:37 crc kubenswrapper[4703]: I0130 12:51:37.915261 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_64e4eca7-b75f-4ac6-ba29-f017d5aec24e/openstackclient/0.log" Jan 30 12:51:38 crc kubenswrapper[4703]: I0130 12:51:38.206308 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-jnlms_64bd0ec4-dbb4-4b67-b6ae-312c7667f598/openstack-network-exporter/0.log" Jan 30 12:51:38 crc kubenswrapper[4703]: I0130 12:51:38.719600 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-tg9tq_20007227-8914-4ec9-ad56-1bf477408476/ovsdb-server-init/0.log" Jan 30 12:51:38 crc kubenswrapper[4703]: I0130 12:51:38.953528 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-tg9tq_20007227-8914-4ec9-ad56-1bf477408476/ovsdb-server-init/0.log" Jan 30 12:51:38 crc kubenswrapper[4703]: I0130 12:51:38.956613 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-tg9tq_20007227-8914-4ec9-ad56-1bf477408476/ovs-vswitchd/0.log" Jan 30 12:51:39 crc kubenswrapper[4703]: I0130 12:51:39.082377 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-tg9tq_20007227-8914-4ec9-ad56-1bf477408476/ovsdb-server/0.log" Jan 30 12:51:39 crc kubenswrapper[4703]: I0130 12:51:39.088634 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:51:39 crc kubenswrapper[4703]: E0130 12:51:39.089001 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:51:39 crc kubenswrapper[4703]: I0130 12:51:39.228458 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-rjbtf_fd146d96-e737-48a6-a3e4-d414913da90f/ovn-controller/0.log" Jan 30 12:51:39 crc kubenswrapper[4703]: I0130 12:51:39.625516 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-dvwp2_28b7f396-866b-4dc5-9ed5-d45a94da5890/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Jan 30 12:51:39 crc kubenswrapper[4703]: I0130 12:51:39.794708 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea/ovn-northd/0.log" Jan 30 12:51:39 crc kubenswrapper[4703]: I0130 12:51:39.798934 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_6f47a6bb-a9ea-4ad2-8508-a0a9b8421cea/openstack-network-exporter/0.log" Jan 30 12:51:39 crc kubenswrapper[4703]: I0130 12:51:39.986544 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5b848cfc-5296-423c-9a02-45bf4c2c850b/openstack-network-exporter/0.log" Jan 30 12:51:40 crc kubenswrapper[4703]: I0130 12:51:40.063095 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5b848cfc-5296-423c-9a02-45bf4c2c850b/ovsdbserver-nb/0.log" Jan 30 12:51:40 crc kubenswrapper[4703]: I0130 12:51:40.086408 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:51:40 crc kubenswrapper[4703]: E0130 12:51:40.086716 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:51:40 crc kubenswrapper[4703]: I0130 12:51:40.260668 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c/openstack-network-exporter/0.log" Jan 30 12:51:40 crc kubenswrapper[4703]: I0130 12:51:40.289955 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_8c0494a8-e5f2-42bf-bcd4-f81c0f5f356c/ovsdbserver-sb/0.log" Jan 30 12:51:40 crc kubenswrapper[4703]: I0130 12:51:40.415325 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5bb45bd7f4-hsvwp_ab801f17-5c1d-4e5d-9e0c-24778ca21833/placement-api/0.log" Jan 30 12:51:40 crc kubenswrapper[4703]: I0130 12:51:40.542835 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-5bb45bd7f4-hsvwp_ab801f17-5c1d-4e5d-9e0c-24778ca21833/placement-log/0.log" Jan 30 12:51:40 crc kubenswrapper[4703]: I0130 12:51:40.752089 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_b250e200-23dc-4508-924c-a4a666aad8e5/init-config-reloader/0.log" Jan 30 12:51:40 crc kubenswrapper[4703]: I0130 12:51:40.924610 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_b250e200-23dc-4508-924c-a4a666aad8e5/init-config-reloader/0.log" Jan 30 12:51:40 crc kubenswrapper[4703]: I0130 12:51:40.960630 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_b250e200-23dc-4508-924c-a4a666aad8e5/prometheus/0.log" Jan 30 12:51:41 crc kubenswrapper[4703]: I0130 12:51:41.005709 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_b250e200-23dc-4508-924c-a4a666aad8e5/config-reloader/0.log" Jan 30 12:51:41 crc kubenswrapper[4703]: I0130 12:51:41.061872 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_b250e200-23dc-4508-924c-a4a666aad8e5/thanos-sidecar/0.log" Jan 30 12:51:41 crc kubenswrapper[4703]: I0130 12:51:41.249958 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_532d7bbf-dd48-4d8f-ae5e-27a35bb889e1/setup-container/0.log" Jan 30 12:51:41 crc kubenswrapper[4703]: I0130 12:51:41.537579 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_2ec18b52-27e8-4a28-819d-dcb39325cbf7/setup-container/0.log" Jan 30 12:51:41 crc kubenswrapper[4703]: I0130 12:51:41.558934 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_532d7bbf-dd48-4d8f-ae5e-27a35bb889e1/setup-container/0.log" Jan 30 12:51:41 crc kubenswrapper[4703]: I0130 12:51:41.641234 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_532d7bbf-dd48-4d8f-ae5e-27a35bb889e1/rabbitmq/0.log" Jan 30 12:51:41 crc kubenswrapper[4703]: I0130 12:51:41.797635 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_2ec18b52-27e8-4a28-819d-dcb39325cbf7/setup-container/0.log" Jan 30 12:51:41 crc kubenswrapper[4703]: I0130 12:51:41.841908 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_2ec18b52-27e8-4a28-819d-dcb39325cbf7/rabbitmq/0.log" Jan 30 12:51:42 crc kubenswrapper[4703]: I0130 12:51:42.091371 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-lhvb9_2b4c4fdd-2e01-44c0-b655-364b653d45ae/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 30 12:51:42 crc kubenswrapper[4703]: I0130 12:51:42.207679 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-wlvc8_2fb337f3-59f5-45cf-9e47-dde8e6dac066/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Jan 30 12:51:42 crc kubenswrapper[4703]: I0130 12:51:42.372544 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-q6zlp_e0551c8c-34a1-4ca3-ab12-6aff9684e1bd/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Jan 30 12:51:42 crc kubenswrapper[4703]: I0130 12:51:42.619265 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-x9xc6_cc1e24eb-d37d-4ce9-afaa-2af8105a976e/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 30 12:51:42 crc kubenswrapper[4703]: I0130 12:51:42.706545 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-7x7vm_ead7b76f-fd6a-41eb-9787-a9a1cb3b5820/ssh-known-hosts-edpm-deployment/0.log" Jan 30 12:51:42 crc kubenswrapper[4703]: I0130 12:51:42.968630 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-79994f947f-qcpzj_f2a53a72-a42a-42aa-b018-50ca9a7fa9aa/proxy-server/0.log" Jan 30 12:51:43 crc kubenswrapper[4703]: I0130 12:51:43.029272 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-79994f947f-qcpzj_f2a53a72-a42a-42aa-b018-50ca9a7fa9aa/proxy-httpd/0.log" Jan 30 12:51:43 crc kubenswrapper[4703]: I0130 12:51:43.149762 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-v4pc7_2688299b-7d5f-4fad-9fd9-78de6b83b333/swift-ring-rebalance/0.log" Jan 30 12:51:43 crc kubenswrapper[4703]: I0130 12:51:43.332005 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6aed7811-b088-403c-bbef-7844c17d52ff/account-auditor/0.log" Jan 30 12:51:43 crc kubenswrapper[4703]: I0130 12:51:43.371786 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6aed7811-b088-403c-bbef-7844c17d52ff/account-reaper/0.log" Jan 30 12:51:43 crc kubenswrapper[4703]: I0130 12:51:43.411668 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6aed7811-b088-403c-bbef-7844c17d52ff/account-replicator/0.log" Jan 30 12:51:43 crc kubenswrapper[4703]: I0130 12:51:43.533322 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6aed7811-b088-403c-bbef-7844c17d52ff/account-server/0.log" Jan 30 12:51:43 crc kubenswrapper[4703]: I0130 12:51:43.570762 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6aed7811-b088-403c-bbef-7844c17d52ff/container-auditor/0.log" Jan 30 12:51:43 crc kubenswrapper[4703]: I0130 12:51:43.684690 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6aed7811-b088-403c-bbef-7844c17d52ff/container-server/0.log" Jan 30 12:51:43 crc kubenswrapper[4703]: I0130 12:51:43.717080 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6aed7811-b088-403c-bbef-7844c17d52ff/container-replicator/0.log" Jan 30 12:51:43 crc kubenswrapper[4703]: I0130 12:51:43.793650 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6aed7811-b088-403c-bbef-7844c17d52ff/container-updater/0.log" Jan 30 12:51:43 crc kubenswrapper[4703]: I0130 12:51:43.925399 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6aed7811-b088-403c-bbef-7844c17d52ff/object-auditor/0.log" Jan 30 12:51:43 crc kubenswrapper[4703]: I0130 12:51:43.950387 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6aed7811-b088-403c-bbef-7844c17d52ff/object-expirer/0.log" Jan 30 12:51:44 crc kubenswrapper[4703]: I0130 12:51:44.056310 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6aed7811-b088-403c-bbef-7844c17d52ff/object-replicator/0.log" Jan 30 12:51:44 crc kubenswrapper[4703]: I0130 12:51:44.100559 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6aed7811-b088-403c-bbef-7844c17d52ff/object-server/0.log" Jan 30 12:51:44 crc kubenswrapper[4703]: I0130 12:51:44.169863 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6aed7811-b088-403c-bbef-7844c17d52ff/object-updater/0.log" Jan 30 12:51:44 crc kubenswrapper[4703]: I0130 12:51:44.227766 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6aed7811-b088-403c-bbef-7844c17d52ff/rsync/0.log" Jan 30 12:51:44 crc kubenswrapper[4703]: I0130 12:51:44.361342 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_6aed7811-b088-403c-bbef-7844c17d52ff/swift-recon-cron/0.log" Jan 30 12:51:44 crc kubenswrapper[4703]: I0130 12:51:44.538918 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-8d7ql_0563aaa6-b1b3-411e-8ff0-4ef8986fa932/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Jan 30 12:51:44 crc kubenswrapper[4703]: I0130 12:51:44.710831 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_01f70e75-8739-4f5d-bd1b-c064f21b1565/tempest-tests-tempest-tests-runner/0.log" Jan 30 12:51:44 crc kubenswrapper[4703]: I0130 12:51:44.814499 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_546bd0aa-e89f-4f5f-bd6a-36a8a79f5f7e/test-operator-logs-container/0.log" Jan 30 12:51:45 crc kubenswrapper[4703]: I0130 12:51:45.009897 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-glhhp_07c6833e-dfc2-43df-812b-639533947bcb/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 30 12:51:45 crc kubenswrapper[4703]: I0130 12:51:45.242256 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_58251080-4018-4188-9136-fa8e49f90aa3/watcher-api-log/0.log" Jan 30 12:51:45 crc kubenswrapper[4703]: I0130 12:51:45.458222 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-applier-0_78484d4c-53f4-4790-965f-2b8c22d7a9ce/watcher-applier/0.log" Jan 30 12:51:46 crc kubenswrapper[4703]: I0130 12:51:46.030343 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-decision-engine-0_a03fc62c-0c38-4b37-b568-661a611eaadd/watcher-decision-engine/0.log" Jan 30 12:51:47 crc kubenswrapper[4703]: I0130 12:51:47.110923 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_watcher-api-0_58251080-4018-4188-9136-fa8e49f90aa3/watcher-api/0.log" Jan 30 12:51:51 crc kubenswrapper[4703]: I0130 12:51:51.089106 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:51:51 crc kubenswrapper[4703]: E0130 12:51:51.090579 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:51:52 crc kubenswrapper[4703]: I0130 12:51:52.087626 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:51:52 crc kubenswrapper[4703]: E0130 12:51:52.087942 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:51:54 crc kubenswrapper[4703]: I0130 12:51:54.307601 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_a5650e0f-dfd5-4fc5-a728-b1eab91a0d23/memcached/0.log" Jan 30 12:52:00 crc kubenswrapper[4703]: I0130 12:52:00.770322 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4xw89"] Jan 30 12:52:00 crc kubenswrapper[4703]: E0130 12:52:00.775619 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3dd021a-377c-4f6e-871b-463c8e7b39d3" containerName="container-00" Jan 30 12:52:00 crc kubenswrapper[4703]: I0130 12:52:00.775653 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3dd021a-377c-4f6e-871b-463c8e7b39d3" containerName="container-00" Jan 30 12:52:00 crc kubenswrapper[4703]: I0130 12:52:00.775941 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3dd021a-377c-4f6e-871b-463c8e7b39d3" containerName="container-00" Jan 30 12:52:00 crc kubenswrapper[4703]: I0130 12:52:00.777931 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4xw89" Jan 30 12:52:00 crc kubenswrapper[4703]: I0130 12:52:00.794084 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4xw89"] Jan 30 12:52:00 crc kubenswrapper[4703]: I0130 12:52:00.946517 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f387c846-3b7c-4333-a010-4e714b32392e-utilities\") pod \"redhat-operators-4xw89\" (UID: \"f387c846-3b7c-4333-a010-4e714b32392e\") " pod="openshift-marketplace/redhat-operators-4xw89" Jan 30 12:52:00 crc kubenswrapper[4703]: I0130 12:52:00.946992 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f387c846-3b7c-4333-a010-4e714b32392e-catalog-content\") pod \"redhat-operators-4xw89\" (UID: \"f387c846-3b7c-4333-a010-4e714b32392e\") " pod="openshift-marketplace/redhat-operators-4xw89" Jan 30 12:52:00 crc kubenswrapper[4703]: I0130 12:52:00.947224 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6752m\" (UniqueName: \"kubernetes.io/projected/f387c846-3b7c-4333-a010-4e714b32392e-kube-api-access-6752m\") pod \"redhat-operators-4xw89\" (UID: \"f387c846-3b7c-4333-a010-4e714b32392e\") " pod="openshift-marketplace/redhat-operators-4xw89" Jan 30 12:52:01 crc kubenswrapper[4703]: I0130 12:52:01.049648 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f387c846-3b7c-4333-a010-4e714b32392e-catalog-content\") pod \"redhat-operators-4xw89\" (UID: \"f387c846-3b7c-4333-a010-4e714b32392e\") " pod="openshift-marketplace/redhat-operators-4xw89" Jan 30 12:52:01 crc kubenswrapper[4703]: I0130 12:52:01.049757 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6752m\" (UniqueName: \"kubernetes.io/projected/f387c846-3b7c-4333-a010-4e714b32392e-kube-api-access-6752m\") pod \"redhat-operators-4xw89\" (UID: \"f387c846-3b7c-4333-a010-4e714b32392e\") " pod="openshift-marketplace/redhat-operators-4xw89" Jan 30 12:52:01 crc kubenswrapper[4703]: I0130 12:52:01.049898 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f387c846-3b7c-4333-a010-4e714b32392e-utilities\") pod \"redhat-operators-4xw89\" (UID: \"f387c846-3b7c-4333-a010-4e714b32392e\") " pod="openshift-marketplace/redhat-operators-4xw89" Jan 30 12:52:01 crc kubenswrapper[4703]: I0130 12:52:01.050375 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f387c846-3b7c-4333-a010-4e714b32392e-catalog-content\") pod \"redhat-operators-4xw89\" (UID: \"f387c846-3b7c-4333-a010-4e714b32392e\") " pod="openshift-marketplace/redhat-operators-4xw89" Jan 30 12:52:01 crc kubenswrapper[4703]: I0130 12:52:01.050699 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f387c846-3b7c-4333-a010-4e714b32392e-utilities\") pod \"redhat-operators-4xw89\" (UID: \"f387c846-3b7c-4333-a010-4e714b32392e\") " pod="openshift-marketplace/redhat-operators-4xw89" Jan 30 12:52:01 crc kubenswrapper[4703]: I0130 12:52:01.084443 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6752m\" (UniqueName: \"kubernetes.io/projected/f387c846-3b7c-4333-a010-4e714b32392e-kube-api-access-6752m\") pod \"redhat-operators-4xw89\" (UID: \"f387c846-3b7c-4333-a010-4e714b32392e\") " pod="openshift-marketplace/redhat-operators-4xw89" Jan 30 12:52:01 crc kubenswrapper[4703]: I0130 12:52:01.104861 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4xw89" Jan 30 12:52:01 crc kubenswrapper[4703]: I0130 12:52:01.755367 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4xw89"] Jan 30 12:52:02 crc kubenswrapper[4703]: I0130 12:52:02.346563 4703 generic.go:334] "Generic (PLEG): container finished" podID="f387c846-3b7c-4333-a010-4e714b32392e" containerID="a1ec120192d29d2a07f60a285592fbb37138c971a4a65d9db066ce22a6e9f1e4" exitCode=0 Jan 30 12:52:02 crc kubenswrapper[4703]: I0130 12:52:02.346661 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4xw89" event={"ID":"f387c846-3b7c-4333-a010-4e714b32392e","Type":"ContainerDied","Data":"a1ec120192d29d2a07f60a285592fbb37138c971a4a65d9db066ce22a6e9f1e4"} Jan 30 12:52:02 crc kubenswrapper[4703]: I0130 12:52:02.347090 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4xw89" event={"ID":"f387c846-3b7c-4333-a010-4e714b32392e","Type":"ContainerStarted","Data":"55f7d24e773432cc9c3d80f412fd5abaaa549ec162987bafea04905bee8ebb05"} Jan 30 12:52:03 crc kubenswrapper[4703]: I0130 12:52:03.087418 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:52:03 crc kubenswrapper[4703]: I0130 12:52:03.087583 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:52:03 crc kubenswrapper[4703]: E0130 12:52:03.087851 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:52:03 crc kubenswrapper[4703]: E0130 12:52:03.087925 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:52:03 crc kubenswrapper[4703]: I0130 12:52:03.360603 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4xw89" event={"ID":"f387c846-3b7c-4333-a010-4e714b32392e","Type":"ContainerStarted","Data":"d39ac038c15870b078945d0acd94fde422d087c34eee02a59335991597f7ee01"} Jan 30 12:52:06 crc kubenswrapper[4703]: I0130 12:52:06.395212 4703 generic.go:334] "Generic (PLEG): container finished" podID="f387c846-3b7c-4333-a010-4e714b32392e" containerID="d39ac038c15870b078945d0acd94fde422d087c34eee02a59335991597f7ee01" exitCode=0 Jan 30 12:52:06 crc kubenswrapper[4703]: I0130 12:52:06.396011 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4xw89" event={"ID":"f387c846-3b7c-4333-a010-4e714b32392e","Type":"ContainerDied","Data":"d39ac038c15870b078945d0acd94fde422d087c34eee02a59335991597f7ee01"} Jan 30 12:52:07 crc kubenswrapper[4703]: I0130 12:52:07.683943 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4xw89" event={"ID":"f387c846-3b7c-4333-a010-4e714b32392e","Type":"ContainerStarted","Data":"a8cd40c06cef5b101e8aca4fbefd72d0f0b72483c0954929f725c1b901fe4011"} Jan 30 12:52:07 crc kubenswrapper[4703]: I0130 12:52:07.727644 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4xw89" podStartSLOduration=3.290285066 podStartE2EDuration="7.72761045s" podCreationTimestamp="2026-01-30 12:52:00 +0000 UTC" firstStartedPulling="2026-01-30 12:52:02.349827602 +0000 UTC m=+3358.127649256" lastFinishedPulling="2026-01-30 12:52:06.787152996 +0000 UTC m=+3362.564974640" observedRunningTime="2026-01-30 12:52:07.715872898 +0000 UTC m=+3363.493694552" watchObservedRunningTime="2026-01-30 12:52:07.72761045 +0000 UTC m=+3363.505432104" Jan 30 12:52:11 crc kubenswrapper[4703]: I0130 12:52:11.105703 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4xw89" Jan 30 12:52:11 crc kubenswrapper[4703]: I0130 12:52:11.106479 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4xw89" Jan 30 12:52:12 crc kubenswrapper[4703]: I0130 12:52:12.196596 4703 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4xw89" podUID="f387c846-3b7c-4333-a010-4e714b32392e" containerName="registry-server" probeResult="failure" output=< Jan 30 12:52:12 crc kubenswrapper[4703]: timeout: failed to connect service ":50051" within 1s Jan 30 12:52:12 crc kubenswrapper[4703]: > Jan 30 12:52:17 crc kubenswrapper[4703]: I0130 12:52:17.087648 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:52:17 crc kubenswrapper[4703]: E0130 12:52:17.088855 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:52:18 crc kubenswrapper[4703]: I0130 12:52:18.087958 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:52:18 crc kubenswrapper[4703]: E0130 12:52:18.088717 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:52:21 crc kubenswrapper[4703]: I0130 12:52:21.170828 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4xw89" Jan 30 12:52:21 crc kubenswrapper[4703]: I0130 12:52:21.241491 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4xw89" Jan 30 12:52:21 crc kubenswrapper[4703]: I0130 12:52:21.431153 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4xw89"] Jan 30 12:52:22 crc kubenswrapper[4703]: I0130 12:52:22.697370 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4xw89" podUID="f387c846-3b7c-4333-a010-4e714b32392e" containerName="registry-server" containerID="cri-o://a8cd40c06cef5b101e8aca4fbefd72d0f0b72483c0954929f725c1b901fe4011" gracePeriod=2 Jan 30 12:52:23 crc kubenswrapper[4703]: I0130 12:52:23.326961 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4xw89" Jan 30 12:52:23 crc kubenswrapper[4703]: I0130 12:52:23.802511 4703 generic.go:334] "Generic (PLEG): container finished" podID="f387c846-3b7c-4333-a010-4e714b32392e" containerID="a8cd40c06cef5b101e8aca4fbefd72d0f0b72483c0954929f725c1b901fe4011" exitCode=0 Jan 30 12:52:23 crc kubenswrapper[4703]: I0130 12:52:23.802576 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4xw89" event={"ID":"f387c846-3b7c-4333-a010-4e714b32392e","Type":"ContainerDied","Data":"a8cd40c06cef5b101e8aca4fbefd72d0f0b72483c0954929f725c1b901fe4011"} Jan 30 12:52:23 crc kubenswrapper[4703]: I0130 12:52:23.802617 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4xw89" event={"ID":"f387c846-3b7c-4333-a010-4e714b32392e","Type":"ContainerDied","Data":"55f7d24e773432cc9c3d80f412fd5abaaa549ec162987bafea04905bee8ebb05"} Jan 30 12:52:23 crc kubenswrapper[4703]: I0130 12:52:23.802627 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4xw89" Jan 30 12:52:23 crc kubenswrapper[4703]: I0130 12:52:23.802651 4703 scope.go:117] "RemoveContainer" containerID="a8cd40c06cef5b101e8aca4fbefd72d0f0b72483c0954929f725c1b901fe4011" Jan 30 12:52:23 crc kubenswrapper[4703]: I0130 12:52:23.845321 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6752m\" (UniqueName: \"kubernetes.io/projected/f387c846-3b7c-4333-a010-4e714b32392e-kube-api-access-6752m\") pod \"f387c846-3b7c-4333-a010-4e714b32392e\" (UID: \"f387c846-3b7c-4333-a010-4e714b32392e\") " Jan 30 12:52:23 crc kubenswrapper[4703]: I0130 12:52:23.845756 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f387c846-3b7c-4333-a010-4e714b32392e-utilities\") pod \"f387c846-3b7c-4333-a010-4e714b32392e\" (UID: \"f387c846-3b7c-4333-a010-4e714b32392e\") " Jan 30 12:52:23 crc kubenswrapper[4703]: I0130 12:52:23.845889 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f387c846-3b7c-4333-a010-4e714b32392e-catalog-content\") pod \"f387c846-3b7c-4333-a010-4e714b32392e\" (UID: \"f387c846-3b7c-4333-a010-4e714b32392e\") " Jan 30 12:52:23 crc kubenswrapper[4703]: I0130 12:52:23.848868 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f387c846-3b7c-4333-a010-4e714b32392e-utilities" (OuterVolumeSpecName: "utilities") pod "f387c846-3b7c-4333-a010-4e714b32392e" (UID: "f387c846-3b7c-4333-a010-4e714b32392e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:52:23 crc kubenswrapper[4703]: I0130 12:52:23.849289 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f387c846-3b7c-4333-a010-4e714b32392e-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:52:23 crc kubenswrapper[4703]: I0130 12:52:23.852566 4703 scope.go:117] "RemoveContainer" containerID="d39ac038c15870b078945d0acd94fde422d087c34eee02a59335991597f7ee01" Jan 30 12:52:23 crc kubenswrapper[4703]: I0130 12:52:23.865867 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f387c846-3b7c-4333-a010-4e714b32392e-kube-api-access-6752m" (OuterVolumeSpecName: "kube-api-access-6752m") pod "f387c846-3b7c-4333-a010-4e714b32392e" (UID: "f387c846-3b7c-4333-a010-4e714b32392e"). InnerVolumeSpecName "kube-api-access-6752m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:52:23 crc kubenswrapper[4703]: I0130 12:52:23.964529 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6752m\" (UniqueName: \"kubernetes.io/projected/f387c846-3b7c-4333-a010-4e714b32392e-kube-api-access-6752m\") on node \"crc\" DevicePath \"\"" Jan 30 12:52:23 crc kubenswrapper[4703]: I0130 12:52:23.994664 4703 scope.go:117] "RemoveContainer" containerID="a1ec120192d29d2a07f60a285592fbb37138c971a4a65d9db066ce22a6e9f1e4" Jan 30 12:52:24 crc kubenswrapper[4703]: I0130 12:52:24.019954 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f387c846-3b7c-4333-a010-4e714b32392e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f387c846-3b7c-4333-a010-4e714b32392e" (UID: "f387c846-3b7c-4333-a010-4e714b32392e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:52:24 crc kubenswrapper[4703]: I0130 12:52:24.067358 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f387c846-3b7c-4333-a010-4e714b32392e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:52:24 crc kubenswrapper[4703]: I0130 12:52:24.069965 4703 scope.go:117] "RemoveContainer" containerID="a8cd40c06cef5b101e8aca4fbefd72d0f0b72483c0954929f725c1b901fe4011" Jan 30 12:52:24 crc kubenswrapper[4703]: E0130 12:52:24.072267 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8cd40c06cef5b101e8aca4fbefd72d0f0b72483c0954929f725c1b901fe4011\": container with ID starting with a8cd40c06cef5b101e8aca4fbefd72d0f0b72483c0954929f725c1b901fe4011 not found: ID does not exist" containerID="a8cd40c06cef5b101e8aca4fbefd72d0f0b72483c0954929f725c1b901fe4011" Jan 30 12:52:24 crc kubenswrapper[4703]: I0130 12:52:24.072319 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8cd40c06cef5b101e8aca4fbefd72d0f0b72483c0954929f725c1b901fe4011"} err="failed to get container status \"a8cd40c06cef5b101e8aca4fbefd72d0f0b72483c0954929f725c1b901fe4011\": rpc error: code = NotFound desc = could not find container \"a8cd40c06cef5b101e8aca4fbefd72d0f0b72483c0954929f725c1b901fe4011\": container with ID starting with a8cd40c06cef5b101e8aca4fbefd72d0f0b72483c0954929f725c1b901fe4011 not found: ID does not exist" Jan 30 12:52:24 crc kubenswrapper[4703]: I0130 12:52:24.072351 4703 scope.go:117] "RemoveContainer" containerID="d39ac038c15870b078945d0acd94fde422d087c34eee02a59335991597f7ee01" Jan 30 12:52:24 crc kubenswrapper[4703]: E0130 12:52:24.072722 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d39ac038c15870b078945d0acd94fde422d087c34eee02a59335991597f7ee01\": container with ID starting with d39ac038c15870b078945d0acd94fde422d087c34eee02a59335991597f7ee01 not found: ID does not exist" containerID="d39ac038c15870b078945d0acd94fde422d087c34eee02a59335991597f7ee01" Jan 30 12:52:24 crc kubenswrapper[4703]: I0130 12:52:24.072756 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d39ac038c15870b078945d0acd94fde422d087c34eee02a59335991597f7ee01"} err="failed to get container status \"d39ac038c15870b078945d0acd94fde422d087c34eee02a59335991597f7ee01\": rpc error: code = NotFound desc = could not find container \"d39ac038c15870b078945d0acd94fde422d087c34eee02a59335991597f7ee01\": container with ID starting with d39ac038c15870b078945d0acd94fde422d087c34eee02a59335991597f7ee01 not found: ID does not exist" Jan 30 12:52:24 crc kubenswrapper[4703]: I0130 12:52:24.072783 4703 scope.go:117] "RemoveContainer" containerID="a1ec120192d29d2a07f60a285592fbb37138c971a4a65d9db066ce22a6e9f1e4" Jan 30 12:52:24 crc kubenswrapper[4703]: E0130 12:52:24.080866 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1ec120192d29d2a07f60a285592fbb37138c971a4a65d9db066ce22a6e9f1e4\": container with ID starting with a1ec120192d29d2a07f60a285592fbb37138c971a4a65d9db066ce22a6e9f1e4 not found: ID does not exist" containerID="a1ec120192d29d2a07f60a285592fbb37138c971a4a65d9db066ce22a6e9f1e4" Jan 30 12:52:24 crc kubenswrapper[4703]: I0130 12:52:24.080916 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1ec120192d29d2a07f60a285592fbb37138c971a4a65d9db066ce22a6e9f1e4"} err="failed to get container status \"a1ec120192d29d2a07f60a285592fbb37138c971a4a65d9db066ce22a6e9f1e4\": rpc error: code = NotFound desc = could not find container \"a1ec120192d29d2a07f60a285592fbb37138c971a4a65d9db066ce22a6e9f1e4\": container with ID starting with a1ec120192d29d2a07f60a285592fbb37138c971a4a65d9db066ce22a6e9f1e4 not found: ID does not exist" Jan 30 12:52:24 crc kubenswrapper[4703]: I0130 12:52:24.154191 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4xw89"] Jan 30 12:52:24 crc kubenswrapper[4703]: I0130 12:52:24.163048 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4xw89"] Jan 30 12:52:25 crc kubenswrapper[4703]: I0130 12:52:25.102700 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f387c846-3b7c-4333-a010-4e714b32392e" path="/var/lib/kubelet/pods/f387c846-3b7c-4333-a010-4e714b32392e/volumes" Jan 30 12:52:25 crc kubenswrapper[4703]: I0130 12:52:25.886634 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b6c4d8c5f-2l6qz_7434f3b6-d77f-48b7-8ceb-f084a9c283f3/manager/0.log" Jan 30 12:52:26 crc kubenswrapper[4703]: I0130 12:52:26.041640 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc_0d0ed19e-733e-4b63-923f-426ecea8ffe1/util/0.log" Jan 30 12:52:26 crc kubenswrapper[4703]: I0130 12:52:26.910760 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc_0d0ed19e-733e-4b63-923f-426ecea8ffe1/pull/0.log" Jan 30 12:52:26 crc kubenswrapper[4703]: I0130 12:52:26.951133 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc_0d0ed19e-733e-4b63-923f-426ecea8ffe1/util/0.log" Jan 30 12:52:26 crc kubenswrapper[4703]: I0130 12:52:26.998883 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc_0d0ed19e-733e-4b63-923f-426ecea8ffe1/pull/0.log" Jan 30 12:52:27 crc kubenswrapper[4703]: I0130 12:52:27.197172 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc_0d0ed19e-733e-4b63-923f-426ecea8ffe1/util/0.log" Jan 30 12:52:27 crc kubenswrapper[4703]: I0130 12:52:27.218008 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc_0d0ed19e-733e-4b63-923f-426ecea8ffe1/extract/0.log" Jan 30 12:52:27 crc kubenswrapper[4703]: I0130 12:52:27.228540 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ce3d6151ccd093ea87125b9ed7c7ab7983f2432d0cbc5f05fa26213628qxskc_0d0ed19e-733e-4b63-923f-426ecea8ffe1/pull/0.log" Jan 30 12:52:27 crc kubenswrapper[4703]: I0130 12:52:27.457674 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-6d9697b7f4-wxxzb_918fbb0a-6011-4785-8b99-c69ef91af7ef/manager/0.log" Jan 30 12:52:27 crc kubenswrapper[4703]: I0130 12:52:27.479280 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-8d874c8fc-pdt6v_6219460f-6432-4743-984d-e3c0ce8d4538/manager/0.log" Jan 30 12:52:27 crc kubenswrapper[4703]: I0130 12:52:27.738775 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-69d6db494d-fzxq8_2f2ed0c3-f32f-4402-9e49-3ef2c200c73a/manager/0.log" Jan 30 12:52:27 crc kubenswrapper[4703]: I0130 12:52:27.800813 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-8886f4c47-vb4kb_a634d819-9927-4b82-8ec1-959ca5f19908/manager/0.log" Jan 30 12:52:28 crc kubenswrapper[4703]: I0130 12:52:28.020305 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5fb775575f-q26rs_61661318-fcab-41f8-a672-7fe2b6cfa1ce/manager/0.log" Jan 30 12:52:28 crc kubenswrapper[4703]: I0130 12:52:28.382069 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5f4b8bd54d-p245s_0e5cd4fd-45a1-4cdb-9317-35abf01f5c33/manager/0.log" Jan 30 12:52:28 crc kubenswrapper[4703]: I0130 12:52:28.383879 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-79955696d6-z2td9_fd6c3383-802c-4e61-9dba-3f691d8d0fbc/manager/0.log" Jan 30 12:52:28 crc kubenswrapper[4703]: I0130 12:52:28.611543 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-84f48565d4-xv5lq_9cc112c6-c3ca-4d9c-ab24-178578e1a41f/manager/0.log" Jan 30 12:52:28 crc kubenswrapper[4703]: I0130 12:52:28.638693 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-7dd968899f-sbcr8_d09ba286-529c-4d2e-b56a-7c8efaff7fec/manager/0.log" Jan 30 12:52:28 crc kubenswrapper[4703]: I0130 12:52:28.870850 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-67bf948998-zzdzn_bb4211ca-5463-47ec-851d-86f23ff74397/manager/0.log" Jan 30 12:52:28 crc kubenswrapper[4703]: I0130 12:52:28.954864 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-585dbc889-w9kvh_a530abd9-3d62-4c65-b9e8-190208cbefd4/manager/0.log" Jan 30 12:52:29 crc kubenswrapper[4703]: I0130 12:52:29.265860 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-6687f8d877-nxm9c_31df4dd4-aab4-4e73-9229-c78a40e82eda/manager/0.log" Jan 30 12:52:29 crc kubenswrapper[4703]: I0130 12:52:29.266435 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-55bff696bd-x7cwt_e0183650-4de1-4a80-a310-3313864fae9b/manager/0.log" Jan 30 12:52:29 crc kubenswrapper[4703]: I0130 12:52:29.447192 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-59c4b45c4d26bq2_98241c60-d78a-4a93-bfb8-a061e65c7c83/manager/0.log" Jan 30 12:52:30 crc kubenswrapper[4703]: I0130 12:52:30.097548 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-c759d5c99-j4njr_83a386a8-7fbe-48ca-b837-731a4fd64c57/operator/0.log" Jan 30 12:52:30 crc kubenswrapper[4703]: I0130 12:52:30.841931 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-nfflc_2dd61d9e-b40e-44fe-84bc-9be8323b133a/registry-server/0.log" Jan 30 12:52:31 crc kubenswrapper[4703]: I0130 12:52:31.087408 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:52:31 crc kubenswrapper[4703]: E0130 12:52:31.087796 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:52:31 crc kubenswrapper[4703]: I0130 12:52:31.096529 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-788c46999f-zqh8w_baed05bb-6058-489e-b1ad-424333b94494/manager/0.log" Jan 30 12:52:31 crc kubenswrapper[4703]: I0130 12:52:31.303015 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b964cf4cd-qzmpv_2d50762e-d50c-4448-bb9c-19136516a622/manager/0.log" Jan 30 12:52:31 crc kubenswrapper[4703]: I0130 12:52:31.491398 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-mfgtf_dfa2e6c9-10c2-48ee-8aed-315d5bf5b1c5/operator/0.log" Jan 30 12:52:31 crc kubenswrapper[4703]: I0130 12:52:31.752608 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-68fc8c869-hw7pr_f1787cd3-2c84-43bc-87dd-5356e44ba9cd/manager/0.log" Jan 30 12:52:32 crc kubenswrapper[4703]: I0130 12:52:32.079644 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-56f8bfcd9f-x2z8q_842c0b1e-43ca-49b3-bcc0-a5f9714773ac/manager/0.log" Jan 30 12:52:32 crc kubenswrapper[4703]: I0130 12:52:32.081715 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-64b5b76f97-cwdkz_425e555c-7283-4169-9293-e380104d38ca/manager/0.log" Jan 30 12:52:32 crc kubenswrapper[4703]: I0130 12:52:32.086708 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:52:32 crc kubenswrapper[4703]: E0130 12:52:32.087180 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:52:32 crc kubenswrapper[4703]: I0130 12:52:32.273260 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-6987d5d556-cff79_9d4e1655-d610-4867-b435-4ec960bb483c/manager/0.log" Jan 30 12:52:32 crc kubenswrapper[4703]: I0130 12:52:32.421320 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-79c984db64-qk88l_7f8888ec-800b-41b1-a297-576a28957668/manager/0.log" Jan 30 12:52:42 crc kubenswrapper[4703]: I0130 12:52:42.190391 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:52:42 crc kubenswrapper[4703]: E0130 12:52:42.191826 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:52:47 crc kubenswrapper[4703]: I0130 12:52:47.087482 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:52:47 crc kubenswrapper[4703]: E0130 12:52:47.088827 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:52:53 crc kubenswrapper[4703]: I0130 12:52:53.087489 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:52:53 crc kubenswrapper[4703]: E0130 12:52:53.088480 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:53:00 crc kubenswrapper[4703]: I0130 12:53:00.087898 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:53:00 crc kubenswrapper[4703]: E0130 12:53:00.089078 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:53:00 crc kubenswrapper[4703]: I0130 12:53:00.485598 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-92h6w_40e464c0-96c5-4c69-8537-ef29b93319ab/control-plane-machine-set-operator/0.log" Jan 30 12:53:00 crc kubenswrapper[4703]: I0130 12:53:00.822452 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-x5w7b_8ff6e057-d092-41f8-908a-7f718f8e7813/kube-rbac-proxy/0.log" Jan 30 12:53:00 crc kubenswrapper[4703]: I0130 12:53:00.822859 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-x5w7b_8ff6e057-d092-41f8-908a-7f718f8e7813/machine-api-operator/0.log" Jan 30 12:53:07 crc kubenswrapper[4703]: I0130 12:53:07.087598 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:53:07 crc kubenswrapper[4703]: E0130 12:53:07.088516 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:53:14 crc kubenswrapper[4703]: I0130 12:53:14.087300 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:53:14 crc kubenswrapper[4703]: E0130 12:53:14.088498 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:53:19 crc kubenswrapper[4703]: I0130 12:53:19.377158 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-tfrws_86682bcd-a9b5-485b-b91d-81b53fa2536e/cert-manager-controller/0.log" Jan 30 12:53:19 crc kubenswrapper[4703]: I0130 12:53:19.694613 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-lxbpt_bba7ab08-ac7f-40d7-9da9-61da28bf3023/cert-manager-cainjector/0.log" Jan 30 12:53:19 crc kubenswrapper[4703]: I0130 12:53:19.747480 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-fpdbd_60c1cbf2-1a9e-481a-b264-5f3cd8536c08/cert-manager-webhook/0.log" Jan 30 12:53:21 crc kubenswrapper[4703]: I0130 12:53:21.086799 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:53:21 crc kubenswrapper[4703]: E0130 12:53:21.087793 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:53:26 crc kubenswrapper[4703]: I0130 12:53:26.086918 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:53:26 crc kubenswrapper[4703]: E0130 12:53:26.088595 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:53:32 crc kubenswrapper[4703]: I0130 12:53:32.086302 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:53:32 crc kubenswrapper[4703]: E0130 12:53:32.087091 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 12:53:35 crc kubenswrapper[4703]: I0130 12:53:35.724895 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-82sv7"] Jan 30 12:53:35 crc kubenswrapper[4703]: E0130 12:53:35.725884 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f387c846-3b7c-4333-a010-4e714b32392e" containerName="extract-content" Jan 30 12:53:35 crc kubenswrapper[4703]: I0130 12:53:35.725926 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f387c846-3b7c-4333-a010-4e714b32392e" containerName="extract-content" Jan 30 12:53:35 crc kubenswrapper[4703]: E0130 12:53:35.725959 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f387c846-3b7c-4333-a010-4e714b32392e" containerName="registry-server" Jan 30 12:53:35 crc kubenswrapper[4703]: I0130 12:53:35.725966 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f387c846-3b7c-4333-a010-4e714b32392e" containerName="registry-server" Jan 30 12:53:35 crc kubenswrapper[4703]: E0130 12:53:35.725979 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f387c846-3b7c-4333-a010-4e714b32392e" containerName="extract-utilities" Jan 30 12:53:35 crc kubenswrapper[4703]: I0130 12:53:35.725987 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="f387c846-3b7c-4333-a010-4e714b32392e" containerName="extract-utilities" Jan 30 12:53:35 crc kubenswrapper[4703]: I0130 12:53:35.726255 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="f387c846-3b7c-4333-a010-4e714b32392e" containerName="registry-server" Jan 30 12:53:35 crc kubenswrapper[4703]: I0130 12:53:35.728365 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-82sv7" Jan 30 12:53:35 crc kubenswrapper[4703]: I0130 12:53:35.743794 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-82sv7"] Jan 30 12:53:35 crc kubenswrapper[4703]: I0130 12:53:35.860974 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d945360-fc62-47e9-91ce-6ec9cae3d87e-catalog-content\") pod \"community-operators-82sv7\" (UID: \"4d945360-fc62-47e9-91ce-6ec9cae3d87e\") " pod="openshift-marketplace/community-operators-82sv7" Jan 30 12:53:35 crc kubenswrapper[4703]: I0130 12:53:35.861034 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d945360-fc62-47e9-91ce-6ec9cae3d87e-utilities\") pod \"community-operators-82sv7\" (UID: \"4d945360-fc62-47e9-91ce-6ec9cae3d87e\") " pod="openshift-marketplace/community-operators-82sv7" Jan 30 12:53:35 crc kubenswrapper[4703]: I0130 12:53:35.861296 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pqx5\" (UniqueName: \"kubernetes.io/projected/4d945360-fc62-47e9-91ce-6ec9cae3d87e-kube-api-access-7pqx5\") pod \"community-operators-82sv7\" (UID: \"4d945360-fc62-47e9-91ce-6ec9cae3d87e\") " pod="openshift-marketplace/community-operators-82sv7" Jan 30 12:53:35 crc kubenswrapper[4703]: I0130 12:53:35.964938 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d945360-fc62-47e9-91ce-6ec9cae3d87e-catalog-content\") pod \"community-operators-82sv7\" (UID: \"4d945360-fc62-47e9-91ce-6ec9cae3d87e\") " pod="openshift-marketplace/community-operators-82sv7" Jan 30 12:53:35 crc kubenswrapper[4703]: I0130 12:53:35.965364 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d945360-fc62-47e9-91ce-6ec9cae3d87e-utilities\") pod \"community-operators-82sv7\" (UID: \"4d945360-fc62-47e9-91ce-6ec9cae3d87e\") " pod="openshift-marketplace/community-operators-82sv7" Jan 30 12:53:35 crc kubenswrapper[4703]: I0130 12:53:35.965452 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pqx5\" (UniqueName: \"kubernetes.io/projected/4d945360-fc62-47e9-91ce-6ec9cae3d87e-kube-api-access-7pqx5\") pod \"community-operators-82sv7\" (UID: \"4d945360-fc62-47e9-91ce-6ec9cae3d87e\") " pod="openshift-marketplace/community-operators-82sv7" Jan 30 12:53:35 crc kubenswrapper[4703]: I0130 12:53:35.965683 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d945360-fc62-47e9-91ce-6ec9cae3d87e-catalog-content\") pod \"community-operators-82sv7\" (UID: \"4d945360-fc62-47e9-91ce-6ec9cae3d87e\") " pod="openshift-marketplace/community-operators-82sv7" Jan 30 12:53:35 crc kubenswrapper[4703]: I0130 12:53:35.966042 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d945360-fc62-47e9-91ce-6ec9cae3d87e-utilities\") pod \"community-operators-82sv7\" (UID: \"4d945360-fc62-47e9-91ce-6ec9cae3d87e\") " pod="openshift-marketplace/community-operators-82sv7" Jan 30 12:53:35 crc kubenswrapper[4703]: I0130 12:53:35.993775 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pqx5\" (UniqueName: \"kubernetes.io/projected/4d945360-fc62-47e9-91ce-6ec9cae3d87e-kube-api-access-7pqx5\") pod \"community-operators-82sv7\" (UID: \"4d945360-fc62-47e9-91ce-6ec9cae3d87e\") " pod="openshift-marketplace/community-operators-82sv7" Jan 30 12:53:36 crc kubenswrapper[4703]: I0130 12:53:36.055407 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-82sv7" Jan 30 12:53:36 crc kubenswrapper[4703]: W0130 12:53:36.881627 4703 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4d945360_fc62_47e9_91ce_6ec9cae3d87e.slice/crio-5f5fa0976bd4832d39b46cfa59252f334663bd24e12fd1031642ce20040a2f91 WatchSource:0}: Error finding container 5f5fa0976bd4832d39b46cfa59252f334663bd24e12fd1031642ce20040a2f91: Status 404 returned error can't find the container with id 5f5fa0976bd4832d39b46cfa59252f334663bd24e12fd1031642ce20040a2f91 Jan 30 12:53:36 crc kubenswrapper[4703]: I0130 12:53:36.900398 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-82sv7"] Jan 30 12:53:37 crc kubenswrapper[4703]: I0130 12:53:37.396830 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-4qn59_ca06ad3d-27fa-4486-a6a6-2eeed1619033/nmstate-console-plugin/0.log" Jan 30 12:53:37 crc kubenswrapper[4703]: I0130 12:53:37.561705 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-8ltjk_df161a1d-9b81-4b35-82b8-2efb65b490d4/nmstate-handler/0.log" Jan 30 12:53:37 crc kubenswrapper[4703]: I0130 12:53:37.564293 4703 generic.go:334] "Generic (PLEG): container finished" podID="4d945360-fc62-47e9-91ce-6ec9cae3d87e" containerID="4106830eaff6f99964a839509fcffb8af496b9b1e5628c6bb3348bbec5cad824" exitCode=0 Jan 30 12:53:37 crc kubenswrapper[4703]: I0130 12:53:37.564355 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-82sv7" event={"ID":"4d945360-fc62-47e9-91ce-6ec9cae3d87e","Type":"ContainerDied","Data":"4106830eaff6f99964a839509fcffb8af496b9b1e5628c6bb3348bbec5cad824"} Jan 30 12:53:37 crc kubenswrapper[4703]: I0130 12:53:37.564398 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-82sv7" event={"ID":"4d945360-fc62-47e9-91ce-6ec9cae3d87e","Type":"ContainerStarted","Data":"5f5fa0976bd4832d39b46cfa59252f334663bd24e12fd1031642ce20040a2f91"} Jan 30 12:53:37 crc kubenswrapper[4703]: I0130 12:53:37.567714 4703 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 30 12:53:37 crc kubenswrapper[4703]: I0130 12:53:37.678792 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-rldth_f2c5f325-b450-4ffb-b23e-fb7322f9aad0/kube-rbac-proxy/0.log" Jan 30 12:53:37 crc kubenswrapper[4703]: I0130 12:53:37.801064 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-rldth_f2c5f325-b450-4ffb-b23e-fb7322f9aad0/nmstate-metrics/0.log" Jan 30 12:53:37 crc kubenswrapper[4703]: I0130 12:53:37.863880 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-gzrj2_c26bacd3-3ee7-4276-b58d-a3213c1d8cd0/nmstate-operator/0.log" Jan 30 12:53:38 crc kubenswrapper[4703]: I0130 12:53:38.048508 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-jndjq_affa004f-4f06-4a2c-9e91-3ac54a7e5a4f/nmstate-webhook/0.log" Jan 30 12:53:38 crc kubenswrapper[4703]: I0130 12:53:38.086934 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:53:38 crc kubenswrapper[4703]: E0130 12:53:38.087297 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:53:38 crc kubenswrapper[4703]: I0130 12:53:38.579025 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-82sv7" event={"ID":"4d945360-fc62-47e9-91ce-6ec9cae3d87e","Type":"ContainerStarted","Data":"6b9d67fb327f2d53ee3f2d25c8ffda79bfdae2ffc1b48caeb71c82796d9d1d4f"} Jan 30 12:53:41 crc kubenswrapper[4703]: I0130 12:53:41.719245 4703 generic.go:334] "Generic (PLEG): container finished" podID="4d945360-fc62-47e9-91ce-6ec9cae3d87e" containerID="6b9d67fb327f2d53ee3f2d25c8ffda79bfdae2ffc1b48caeb71c82796d9d1d4f" exitCode=0 Jan 30 12:53:41 crc kubenswrapper[4703]: I0130 12:53:41.719348 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-82sv7" event={"ID":"4d945360-fc62-47e9-91ce-6ec9cae3d87e","Type":"ContainerDied","Data":"6b9d67fb327f2d53ee3f2d25c8ffda79bfdae2ffc1b48caeb71c82796d9d1d4f"} Jan 30 12:53:42 crc kubenswrapper[4703]: I0130 12:53:42.736712 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-82sv7" event={"ID":"4d945360-fc62-47e9-91ce-6ec9cae3d87e","Type":"ContainerStarted","Data":"440338312b6bd974a53c98467fd1e0cbd05141a80d20b2c860bd03fb068ab5af"} Jan 30 12:53:42 crc kubenswrapper[4703]: I0130 12:53:42.766021 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-82sv7" podStartSLOduration=3.176916242 podStartE2EDuration="7.765974246s" podCreationTimestamp="2026-01-30 12:53:35 +0000 UTC" firstStartedPulling="2026-01-30 12:53:37.567356568 +0000 UTC m=+3453.345178222" lastFinishedPulling="2026-01-30 12:53:42.156414572 +0000 UTC m=+3457.934236226" observedRunningTime="2026-01-30 12:53:42.759100293 +0000 UTC m=+3458.536921947" watchObservedRunningTime="2026-01-30 12:53:42.765974246 +0000 UTC m=+3458.543795900" Jan 30 12:53:45 crc kubenswrapper[4703]: I0130 12:53:45.095389 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:53:45 crc kubenswrapper[4703]: I0130 12:53:45.777751 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerStarted","Data":"6b57ab84336d4fe8c2455961ec7c9b8c7fd3b27d7b48885d7b70a5e429c4b7d2"} Jan 30 12:53:46 crc kubenswrapper[4703]: I0130 12:53:46.055868 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-82sv7" Jan 30 12:53:46 crc kubenswrapper[4703]: I0130 12:53:46.056442 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-82sv7" Jan 30 12:53:46 crc kubenswrapper[4703]: I0130 12:53:46.118834 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-82sv7" Jan 30 12:53:53 crc kubenswrapper[4703]: I0130 12:53:53.087587 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:53:53 crc kubenswrapper[4703]: E0130 12:53:53.088374 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:53:54 crc kubenswrapper[4703]: I0130 12:53:54.924500 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-2dr52_1bba418c-e14f-4710-b936-9431dee41382/prometheus-operator/0.log" Jan 30 12:53:55 crc kubenswrapper[4703]: I0130 12:53:55.128694 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w_51d9f47f-4fa1-4632-be7a-f92fcb054ae6/prometheus-operator-admission-webhook/0.log" Jan 30 12:53:55 crc kubenswrapper[4703]: I0130 12:53:55.216607 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4_f5d72830-ff3a-4578-9e66-c738cbf1a7cf/prometheus-operator-admission-webhook/0.log" Jan 30 12:53:55 crc kubenswrapper[4703]: I0130 12:53:55.375861 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-skwqz_4bd5df43-64da-4ab9-8006-83056b6d0d5e/operator/0.log" Jan 30 12:53:55 crc kubenswrapper[4703]: I0130 12:53:55.464536 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-n9zw2_74ad7d1c-6e7a-4f7e-a441-e301ec3593ab/perses-operator/0.log" Jan 30 12:53:56 crc kubenswrapper[4703]: I0130 12:53:56.114104 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-82sv7" Jan 30 12:53:56 crc kubenswrapper[4703]: I0130 12:53:56.179184 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-82sv7"] Jan 30 12:53:56 crc kubenswrapper[4703]: I0130 12:53:56.910040 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-82sv7" podUID="4d945360-fc62-47e9-91ce-6ec9cae3d87e" containerName="registry-server" containerID="cri-o://440338312b6bd974a53c98467fd1e0cbd05141a80d20b2c860bd03fb068ab5af" gracePeriod=2 Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.405768 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-82sv7" Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.461442 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d945360-fc62-47e9-91ce-6ec9cae3d87e-catalog-content\") pod \"4d945360-fc62-47e9-91ce-6ec9cae3d87e\" (UID: \"4d945360-fc62-47e9-91ce-6ec9cae3d87e\") " Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.461580 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d945360-fc62-47e9-91ce-6ec9cae3d87e-utilities\") pod \"4d945360-fc62-47e9-91ce-6ec9cae3d87e\" (UID: \"4d945360-fc62-47e9-91ce-6ec9cae3d87e\") " Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.461642 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7pqx5\" (UniqueName: \"kubernetes.io/projected/4d945360-fc62-47e9-91ce-6ec9cae3d87e-kube-api-access-7pqx5\") pod \"4d945360-fc62-47e9-91ce-6ec9cae3d87e\" (UID: \"4d945360-fc62-47e9-91ce-6ec9cae3d87e\") " Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.462663 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d945360-fc62-47e9-91ce-6ec9cae3d87e-utilities" (OuterVolumeSpecName: "utilities") pod "4d945360-fc62-47e9-91ce-6ec9cae3d87e" (UID: "4d945360-fc62-47e9-91ce-6ec9cae3d87e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.487617 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d945360-fc62-47e9-91ce-6ec9cae3d87e-kube-api-access-7pqx5" (OuterVolumeSpecName: "kube-api-access-7pqx5") pod "4d945360-fc62-47e9-91ce-6ec9cae3d87e" (UID: "4d945360-fc62-47e9-91ce-6ec9cae3d87e"). InnerVolumeSpecName "kube-api-access-7pqx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.528319 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d945360-fc62-47e9-91ce-6ec9cae3d87e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4d945360-fc62-47e9-91ce-6ec9cae3d87e" (UID: "4d945360-fc62-47e9-91ce-6ec9cae3d87e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.565044 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d945360-fc62-47e9-91ce-6ec9cae3d87e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.565103 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d945360-fc62-47e9-91ce-6ec9cae3d87e-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.565117 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7pqx5\" (UniqueName: \"kubernetes.io/projected/4d945360-fc62-47e9-91ce-6ec9cae3d87e-kube-api-access-7pqx5\") on node \"crc\" DevicePath \"\"" Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.925404 4703 generic.go:334] "Generic (PLEG): container finished" podID="4d945360-fc62-47e9-91ce-6ec9cae3d87e" containerID="440338312b6bd974a53c98467fd1e0cbd05141a80d20b2c860bd03fb068ab5af" exitCode=0 Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.925479 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-82sv7" event={"ID":"4d945360-fc62-47e9-91ce-6ec9cae3d87e","Type":"ContainerDied","Data":"440338312b6bd974a53c98467fd1e0cbd05141a80d20b2c860bd03fb068ab5af"} Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.925525 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-82sv7" event={"ID":"4d945360-fc62-47e9-91ce-6ec9cae3d87e","Type":"ContainerDied","Data":"5f5fa0976bd4832d39b46cfa59252f334663bd24e12fd1031642ce20040a2f91"} Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.925527 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-82sv7" Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.925552 4703 scope.go:117] "RemoveContainer" containerID="440338312b6bd974a53c98467fd1e0cbd05141a80d20b2c860bd03fb068ab5af" Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.965431 4703 scope.go:117] "RemoveContainer" containerID="6b9d67fb327f2d53ee3f2d25c8ffda79bfdae2ffc1b48caeb71c82796d9d1d4f" Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.967294 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-82sv7"] Jan 30 12:53:57 crc kubenswrapper[4703]: I0130 12:53:57.985623 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-82sv7"] Jan 30 12:53:58 crc kubenswrapper[4703]: I0130 12:53:58.002690 4703 scope.go:117] "RemoveContainer" containerID="4106830eaff6f99964a839509fcffb8af496b9b1e5628c6bb3348bbec5cad824" Jan 30 12:53:58 crc kubenswrapper[4703]: I0130 12:53:58.066397 4703 scope.go:117] "RemoveContainer" containerID="440338312b6bd974a53c98467fd1e0cbd05141a80d20b2c860bd03fb068ab5af" Jan 30 12:53:58 crc kubenswrapper[4703]: E0130 12:53:58.068830 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"440338312b6bd974a53c98467fd1e0cbd05141a80d20b2c860bd03fb068ab5af\": container with ID starting with 440338312b6bd974a53c98467fd1e0cbd05141a80d20b2c860bd03fb068ab5af not found: ID does not exist" containerID="440338312b6bd974a53c98467fd1e0cbd05141a80d20b2c860bd03fb068ab5af" Jan 30 12:53:58 crc kubenswrapper[4703]: I0130 12:53:58.068875 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"440338312b6bd974a53c98467fd1e0cbd05141a80d20b2c860bd03fb068ab5af"} err="failed to get container status \"440338312b6bd974a53c98467fd1e0cbd05141a80d20b2c860bd03fb068ab5af\": rpc error: code = NotFound desc = could not find container \"440338312b6bd974a53c98467fd1e0cbd05141a80d20b2c860bd03fb068ab5af\": container with ID starting with 440338312b6bd974a53c98467fd1e0cbd05141a80d20b2c860bd03fb068ab5af not found: ID does not exist" Jan 30 12:53:58 crc kubenswrapper[4703]: I0130 12:53:58.068905 4703 scope.go:117] "RemoveContainer" containerID="6b9d67fb327f2d53ee3f2d25c8ffda79bfdae2ffc1b48caeb71c82796d9d1d4f" Jan 30 12:53:58 crc kubenswrapper[4703]: E0130 12:53:58.069679 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b9d67fb327f2d53ee3f2d25c8ffda79bfdae2ffc1b48caeb71c82796d9d1d4f\": container with ID starting with 6b9d67fb327f2d53ee3f2d25c8ffda79bfdae2ffc1b48caeb71c82796d9d1d4f not found: ID does not exist" containerID="6b9d67fb327f2d53ee3f2d25c8ffda79bfdae2ffc1b48caeb71c82796d9d1d4f" Jan 30 12:53:58 crc kubenswrapper[4703]: I0130 12:53:58.069715 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b9d67fb327f2d53ee3f2d25c8ffda79bfdae2ffc1b48caeb71c82796d9d1d4f"} err="failed to get container status \"6b9d67fb327f2d53ee3f2d25c8ffda79bfdae2ffc1b48caeb71c82796d9d1d4f\": rpc error: code = NotFound desc = could not find container \"6b9d67fb327f2d53ee3f2d25c8ffda79bfdae2ffc1b48caeb71c82796d9d1d4f\": container with ID starting with 6b9d67fb327f2d53ee3f2d25c8ffda79bfdae2ffc1b48caeb71c82796d9d1d4f not found: ID does not exist" Jan 30 12:53:58 crc kubenswrapper[4703]: I0130 12:53:58.069737 4703 scope.go:117] "RemoveContainer" containerID="4106830eaff6f99964a839509fcffb8af496b9b1e5628c6bb3348bbec5cad824" Jan 30 12:53:58 crc kubenswrapper[4703]: E0130 12:53:58.070068 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4106830eaff6f99964a839509fcffb8af496b9b1e5628c6bb3348bbec5cad824\": container with ID starting with 4106830eaff6f99964a839509fcffb8af496b9b1e5628c6bb3348bbec5cad824 not found: ID does not exist" containerID="4106830eaff6f99964a839509fcffb8af496b9b1e5628c6bb3348bbec5cad824" Jan 30 12:53:58 crc kubenswrapper[4703]: I0130 12:53:58.070136 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4106830eaff6f99964a839509fcffb8af496b9b1e5628c6bb3348bbec5cad824"} err="failed to get container status \"4106830eaff6f99964a839509fcffb8af496b9b1e5628c6bb3348bbec5cad824\": rpc error: code = NotFound desc = could not find container \"4106830eaff6f99964a839509fcffb8af496b9b1e5628c6bb3348bbec5cad824\": container with ID starting with 4106830eaff6f99964a839509fcffb8af496b9b1e5628c6bb3348bbec5cad824 not found: ID does not exist" Jan 30 12:53:59 crc kubenswrapper[4703]: I0130 12:53:59.101725 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d945360-fc62-47e9-91ce-6ec9cae3d87e" path="/var/lib/kubelet/pods/4d945360-fc62-47e9-91ce-6ec9cae3d87e/volumes" Jan 30 12:54:05 crc kubenswrapper[4703]: I0130 12:54:05.096470 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:54:05 crc kubenswrapper[4703]: E0130 12:54:05.099815 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:54:11 crc kubenswrapper[4703]: I0130 12:54:11.168162 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-mqw8c_a8fcb989-a426-48fb-bf36-bf97b534f004/kube-rbac-proxy/0.log" Jan 30 12:54:11 crc kubenswrapper[4703]: I0130 12:54:11.234478 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-mqw8c_a8fcb989-a426-48fb-bf36-bf97b534f004/controller/0.log" Jan 30 12:54:11 crc kubenswrapper[4703]: I0130 12:54:11.440701 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/cp-frr-files/0.log" Jan 30 12:54:11 crc kubenswrapper[4703]: I0130 12:54:11.674202 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/cp-frr-files/0.log" Jan 30 12:54:11 crc kubenswrapper[4703]: I0130 12:54:11.676485 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/cp-reloader/0.log" Jan 30 12:54:11 crc kubenswrapper[4703]: I0130 12:54:11.698764 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/cp-metrics/0.log" Jan 30 12:54:11 crc kubenswrapper[4703]: I0130 12:54:11.748285 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/cp-reloader/0.log" Jan 30 12:54:11 crc kubenswrapper[4703]: I0130 12:54:11.951399 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/cp-metrics/0.log" Jan 30 12:54:11 crc kubenswrapper[4703]: I0130 12:54:11.974131 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/cp-reloader/0.log" Jan 30 12:54:11 crc kubenswrapper[4703]: I0130 12:54:11.974131 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/cp-frr-files/0.log" Jan 30 12:54:11 crc kubenswrapper[4703]: I0130 12:54:11.976502 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/cp-metrics/0.log" Jan 30 12:54:12 crc kubenswrapper[4703]: I0130 12:54:12.219613 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/cp-reloader/0.log" Jan 30 12:54:12 crc kubenswrapper[4703]: I0130 12:54:12.240186 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/cp-frr-files/0.log" Jan 30 12:54:12 crc kubenswrapper[4703]: I0130 12:54:12.277838 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/cp-metrics/0.log" Jan 30 12:54:12 crc kubenswrapper[4703]: I0130 12:54:12.284020 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/controller/0.log" Jan 30 12:54:12 crc kubenswrapper[4703]: I0130 12:54:12.449209 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/frr-metrics/0.log" Jan 30 12:54:12 crc kubenswrapper[4703]: I0130 12:54:12.479461 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/kube-rbac-proxy-frr/0.log" Jan 30 12:54:12 crc kubenswrapper[4703]: I0130 12:54:12.530455 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/kube-rbac-proxy/0.log" Jan 30 12:54:12 crc kubenswrapper[4703]: I0130 12:54:12.702389 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/reloader/0.log" Jan 30 12:54:12 crc kubenswrapper[4703]: I0130 12:54:12.791772 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-bhs52_fe952b2e-c0d5-4420-a5ba-7128c73c6b3e/frr-k8s-webhook-server/0.log" Jan 30 12:54:13 crc kubenswrapper[4703]: I0130 12:54:13.038972 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-5585c54947-kz2s9_63b2fd5c-5f47-4dcb-bad2-3f370b26a1e8/manager/0.log" Jan 30 12:54:13 crc kubenswrapper[4703]: I0130 12:54:13.208559 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-6dbbf674bb-smzg8_c3323cec-cd09-4ef9-a6b8-7eb5fe2b8715/webhook-server/0.log" Jan 30 12:54:13 crc kubenswrapper[4703]: I0130 12:54:13.333280 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-c59pj_e0696463-355f-411b-89b5-9a0bd3211101/kube-rbac-proxy/0.log" Jan 30 12:54:14 crc kubenswrapper[4703]: I0130 12:54:14.098229 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-c59pj_e0696463-355f-411b-89b5-9a0bd3211101/speaker/0.log" Jan 30 12:54:14 crc kubenswrapper[4703]: I0130 12:54:14.241571 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-krzld_2bb59434-7622-4640-bb0c-28839fa5405c/frr/0.log" Jan 30 12:54:16 crc kubenswrapper[4703]: I0130 12:54:16.086906 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:54:16 crc kubenswrapper[4703]: E0130 12:54:16.087747 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:54:28 crc kubenswrapper[4703]: I0130 12:54:28.210703 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs_96adb521-689d-4159-8cc3-73d1e75c182e/util/0.log" Jan 30 12:54:28 crc kubenswrapper[4703]: I0130 12:54:28.404314 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs_96adb521-689d-4159-8cc3-73d1e75c182e/pull/0.log" Jan 30 12:54:28 crc kubenswrapper[4703]: I0130 12:54:28.426756 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs_96adb521-689d-4159-8cc3-73d1e75c182e/util/0.log" Jan 30 12:54:28 crc kubenswrapper[4703]: I0130 12:54:28.465518 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs_96adb521-689d-4159-8cc3-73d1e75c182e/pull/0.log" Jan 30 12:54:28 crc kubenswrapper[4703]: I0130 12:54:28.645215 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs_96adb521-689d-4159-8cc3-73d1e75c182e/extract/0.log" Jan 30 12:54:28 crc kubenswrapper[4703]: I0130 12:54:28.688474 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs_96adb521-689d-4159-8cc3-73d1e75c182e/util/0.log" Jan 30 12:54:28 crc kubenswrapper[4703]: I0130 12:54:28.716312 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dct79hs_96adb521-689d-4159-8cc3-73d1e75c182e/pull/0.log" Jan 30 12:54:28 crc kubenswrapper[4703]: I0130 12:54:28.897427 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4_b14aa5da-c540-40d4-9f4c-8c00169e91b3/util/0.log" Jan 30 12:54:29 crc kubenswrapper[4703]: I0130 12:54:29.091737 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4_b14aa5da-c540-40d4-9f4c-8c00169e91b3/util/0.log" Jan 30 12:54:29 crc kubenswrapper[4703]: I0130 12:54:29.105686 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4_b14aa5da-c540-40d4-9f4c-8c00169e91b3/pull/0.log" Jan 30 12:54:29 crc kubenswrapper[4703]: I0130 12:54:29.121345 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4_b14aa5da-c540-40d4-9f4c-8c00169e91b3/pull/0.log" Jan 30 12:54:29 crc kubenswrapper[4703]: I0130 12:54:29.424806 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4_b14aa5da-c540-40d4-9f4c-8c00169e91b3/util/0.log" Jan 30 12:54:29 crc kubenswrapper[4703]: I0130 12:54:29.474288 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4_b14aa5da-c540-40d4-9f4c-8c00169e91b3/extract/0.log" Jan 30 12:54:29 crc kubenswrapper[4703]: I0130 12:54:29.477466 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec7136hfj4_b14aa5da-c540-40d4-9f4c-8c00169e91b3/pull/0.log" Jan 30 12:54:29 crc kubenswrapper[4703]: I0130 12:54:29.625782 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x_278c6aa2-fe68-47d0-9626-1b8a42157a4c/util/0.log" Jan 30 12:54:29 crc kubenswrapper[4703]: I0130 12:54:29.850325 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x_278c6aa2-fe68-47d0-9626-1b8a42157a4c/util/0.log" Jan 30 12:54:29 crc kubenswrapper[4703]: I0130 12:54:29.866258 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x_278c6aa2-fe68-47d0-9626-1b8a42157a4c/pull/0.log" Jan 30 12:54:29 crc kubenswrapper[4703]: I0130 12:54:29.866449 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x_278c6aa2-fe68-47d0-9626-1b8a42157a4c/pull/0.log" Jan 30 12:54:30 crc kubenswrapper[4703]: I0130 12:54:30.070654 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x_278c6aa2-fe68-47d0-9626-1b8a42157a4c/pull/0.log" Jan 30 12:54:30 crc kubenswrapper[4703]: I0130 12:54:30.074185 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x_278c6aa2-fe68-47d0-9626-1b8a42157a4c/util/0.log" Jan 30 12:54:30 crc kubenswrapper[4703]: I0130 12:54:30.087049 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:54:30 crc kubenswrapper[4703]: E0130 12:54:30.087904 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:54:30 crc kubenswrapper[4703]: I0130 12:54:30.114826 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08qt94x_278c6aa2-fe68-47d0-9626-1b8a42157a4c/extract/0.log" Jan 30 12:54:30 crc kubenswrapper[4703]: I0130 12:54:30.276258 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-q9b56_17ab8a97-9b84-4eac-be90-6d5283440ddb/extract-utilities/0.log" Jan 30 12:54:30 crc kubenswrapper[4703]: I0130 12:54:30.487516 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-q9b56_17ab8a97-9b84-4eac-be90-6d5283440ddb/extract-content/0.log" Jan 30 12:54:30 crc kubenswrapper[4703]: I0130 12:54:30.487558 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-q9b56_17ab8a97-9b84-4eac-be90-6d5283440ddb/extract-utilities/0.log" Jan 30 12:54:30 crc kubenswrapper[4703]: I0130 12:54:30.513057 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-q9b56_17ab8a97-9b84-4eac-be90-6d5283440ddb/extract-content/0.log" Jan 30 12:54:31 crc kubenswrapper[4703]: I0130 12:54:31.010884 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-q9b56_17ab8a97-9b84-4eac-be90-6d5283440ddb/extract-content/0.log" Jan 30 12:54:31 crc kubenswrapper[4703]: I0130 12:54:31.025015 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-q9b56_17ab8a97-9b84-4eac-be90-6d5283440ddb/extract-utilities/0.log" Jan 30 12:54:31 crc kubenswrapper[4703]: I0130 12:54:31.265314 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r7gjb_1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa/extract-utilities/0.log" Jan 30 12:54:31 crc kubenswrapper[4703]: I0130 12:54:31.486807 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-q9b56_17ab8a97-9b84-4eac-be90-6d5283440ddb/registry-server/0.log" Jan 30 12:54:31 crc kubenswrapper[4703]: I0130 12:54:31.582705 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r7gjb_1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa/extract-utilities/0.log" Jan 30 12:54:31 crc kubenswrapper[4703]: I0130 12:54:31.599846 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r7gjb_1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa/extract-content/0.log" Jan 30 12:54:31 crc kubenswrapper[4703]: I0130 12:54:31.610085 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r7gjb_1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa/extract-content/0.log" Jan 30 12:54:31 crc kubenswrapper[4703]: I0130 12:54:31.834155 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r7gjb_1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa/extract-utilities/0.log" Jan 30 12:54:31 crc kubenswrapper[4703]: I0130 12:54:31.888910 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r7gjb_1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa/extract-content/0.log" Jan 30 12:54:32 crc kubenswrapper[4703]: I0130 12:54:32.098593 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-2wfxz_f0dd4153-47cd-40a1-b929-69ecba1b33f4/marketplace-operator/0.log" Jan 30 12:54:32 crc kubenswrapper[4703]: I0130 12:54:32.231099 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5v2j7_0786e743-c8c4-45c5-abbe-197d54d908f6/extract-utilities/0.log" Jan 30 12:54:32 crc kubenswrapper[4703]: I0130 12:54:32.468113 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-r7gjb_1d70ac4d-de5f-4d0f-84c3-eaf5545c42fa/registry-server/0.log" Jan 30 12:54:32 crc kubenswrapper[4703]: I0130 12:54:32.506991 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5v2j7_0786e743-c8c4-45c5-abbe-197d54d908f6/extract-utilities/0.log" Jan 30 12:54:32 crc kubenswrapper[4703]: I0130 12:54:32.539884 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5v2j7_0786e743-c8c4-45c5-abbe-197d54d908f6/extract-content/0.log" Jan 30 12:54:32 crc kubenswrapper[4703]: I0130 12:54:32.540044 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5v2j7_0786e743-c8c4-45c5-abbe-197d54d908f6/extract-content/0.log" Jan 30 12:54:32 crc kubenswrapper[4703]: I0130 12:54:32.807589 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5v2j7_0786e743-c8c4-45c5-abbe-197d54d908f6/extract-content/0.log" Jan 30 12:54:32 crc kubenswrapper[4703]: I0130 12:54:32.807630 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5v2j7_0786e743-c8c4-45c5-abbe-197d54d908f6/extract-utilities/0.log" Jan 30 12:54:32 crc kubenswrapper[4703]: I0130 12:54:32.854780 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ttcs9_69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed/extract-utilities/0.log" Jan 30 12:54:32 crc kubenswrapper[4703]: I0130 12:54:32.953805 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-5v2j7_0786e743-c8c4-45c5-abbe-197d54d908f6/registry-server/0.log" Jan 30 12:54:33 crc kubenswrapper[4703]: I0130 12:54:33.154707 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ttcs9_69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed/extract-content/0.log" Jan 30 12:54:33 crc kubenswrapper[4703]: I0130 12:54:33.166371 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ttcs9_69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed/extract-utilities/0.log" Jan 30 12:54:33 crc kubenswrapper[4703]: I0130 12:54:33.201910 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ttcs9_69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed/extract-content/0.log" Jan 30 12:54:33 crc kubenswrapper[4703]: I0130 12:54:33.392950 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ttcs9_69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed/extract-utilities/0.log" Jan 30 12:54:33 crc kubenswrapper[4703]: I0130 12:54:33.430095 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ttcs9_69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed/extract-content/0.log" Jan 30 12:54:33 crc kubenswrapper[4703]: I0130 12:54:33.890533 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-ttcs9_69ce5cb6-e908-4387-a9a6-0fe5e7ab44ed/registry-server/0.log" Jan 30 12:54:42 crc kubenswrapper[4703]: I0130 12:54:42.087532 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:54:42 crc kubenswrapper[4703]: E0130 12:54:42.089908 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:54:47 crc kubenswrapper[4703]: I0130 12:54:47.224449 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-2dr52_1bba418c-e14f-4710-b936-9431dee41382/prometheus-operator/0.log" Jan 30 12:54:47 crc kubenswrapper[4703]: I0130 12:54:47.241802 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6f4cc54497-v5wf4_f5d72830-ff3a-4578-9e66-c738cbf1a7cf/prometheus-operator-admission-webhook/0.log" Jan 30 12:54:47 crc kubenswrapper[4703]: I0130 12:54:47.244693 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-6f4cc54497-t5q8w_51d9f47f-4fa1-4632-be7a-f92fcb054ae6/prometheus-operator-admission-webhook/0.log" Jan 30 12:54:47 crc kubenswrapper[4703]: I0130 12:54:47.453610 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-skwqz_4bd5df43-64da-4ab9-8006-83056b6d0d5e/operator/0.log" Jan 30 12:54:47 crc kubenswrapper[4703]: I0130 12:54:47.472921 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-n9zw2_74ad7d1c-6e7a-4f7e-a441-e301ec3593ab/perses-operator/0.log" Jan 30 12:54:56 crc kubenswrapper[4703]: I0130 12:54:56.087574 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:54:56 crc kubenswrapper[4703]: E0130 12:54:56.088721 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:55:07 crc kubenswrapper[4703]: I0130 12:55:07.090614 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:55:07 crc kubenswrapper[4703]: E0130 12:55:07.093442 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:55:18 crc kubenswrapper[4703]: I0130 12:55:18.158589 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:55:18 crc kubenswrapper[4703]: E0130 12:55:18.161072 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.008860 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8p69q"] Jan 30 12:55:22 crc kubenswrapper[4703]: E0130 12:55:22.010111 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d945360-fc62-47e9-91ce-6ec9cae3d87e" containerName="extract-content" Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.010163 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d945360-fc62-47e9-91ce-6ec9cae3d87e" containerName="extract-content" Jan 30 12:55:22 crc kubenswrapper[4703]: E0130 12:55:22.010193 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d945360-fc62-47e9-91ce-6ec9cae3d87e" containerName="registry-server" Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.010202 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d945360-fc62-47e9-91ce-6ec9cae3d87e" containerName="registry-server" Jan 30 12:55:22 crc kubenswrapper[4703]: E0130 12:55:22.010224 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d945360-fc62-47e9-91ce-6ec9cae3d87e" containerName="extract-utilities" Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.010232 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d945360-fc62-47e9-91ce-6ec9cae3d87e" containerName="extract-utilities" Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.010493 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d945360-fc62-47e9-91ce-6ec9cae3d87e" containerName="registry-server" Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.013938 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8p69q" Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.030211 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8p69q"] Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.066231 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-catalog-content\") pod \"redhat-marketplace-8p69q\" (UID: \"6b74ff63-b994-4cba-a3e5-09696ff2ab2d\") " pod="openshift-marketplace/redhat-marketplace-8p69q" Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.066310 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bnwbj\" (UniqueName: \"kubernetes.io/projected/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-kube-api-access-bnwbj\") pod \"redhat-marketplace-8p69q\" (UID: \"6b74ff63-b994-4cba-a3e5-09696ff2ab2d\") " pod="openshift-marketplace/redhat-marketplace-8p69q" Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.066387 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-utilities\") pod \"redhat-marketplace-8p69q\" (UID: \"6b74ff63-b994-4cba-a3e5-09696ff2ab2d\") " pod="openshift-marketplace/redhat-marketplace-8p69q" Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.170973 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-utilities\") pod \"redhat-marketplace-8p69q\" (UID: \"6b74ff63-b994-4cba-a3e5-09696ff2ab2d\") " pod="openshift-marketplace/redhat-marketplace-8p69q" Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.171818 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-utilities\") pod \"redhat-marketplace-8p69q\" (UID: \"6b74ff63-b994-4cba-a3e5-09696ff2ab2d\") " pod="openshift-marketplace/redhat-marketplace-8p69q" Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.172291 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-catalog-content\") pod \"redhat-marketplace-8p69q\" (UID: \"6b74ff63-b994-4cba-a3e5-09696ff2ab2d\") " pod="openshift-marketplace/redhat-marketplace-8p69q" Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.172502 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bnwbj\" (UniqueName: \"kubernetes.io/projected/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-kube-api-access-bnwbj\") pod \"redhat-marketplace-8p69q\" (UID: \"6b74ff63-b994-4cba-a3e5-09696ff2ab2d\") " pod="openshift-marketplace/redhat-marketplace-8p69q" Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.172653 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-catalog-content\") pod \"redhat-marketplace-8p69q\" (UID: \"6b74ff63-b994-4cba-a3e5-09696ff2ab2d\") " pod="openshift-marketplace/redhat-marketplace-8p69q" Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.194302 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bnwbj\" (UniqueName: \"kubernetes.io/projected/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-kube-api-access-bnwbj\") pod \"redhat-marketplace-8p69q\" (UID: \"6b74ff63-b994-4cba-a3e5-09696ff2ab2d\") " pod="openshift-marketplace/redhat-marketplace-8p69q" Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.352227 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8p69q" Jan 30 12:55:22 crc kubenswrapper[4703]: I0130 12:55:22.921208 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8p69q"] Jan 30 12:55:23 crc kubenswrapper[4703]: I0130 12:55:23.954548 4703 generic.go:334] "Generic (PLEG): container finished" podID="6b74ff63-b994-4cba-a3e5-09696ff2ab2d" containerID="aa45e0aaca89db652d0b7465c7ca0d56f445d45735b3866b476cdb4b5bed1090" exitCode=0 Jan 30 12:55:23 crc kubenswrapper[4703]: I0130 12:55:23.954648 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8p69q" event={"ID":"6b74ff63-b994-4cba-a3e5-09696ff2ab2d","Type":"ContainerDied","Data":"aa45e0aaca89db652d0b7465c7ca0d56f445d45735b3866b476cdb4b5bed1090"} Jan 30 12:55:23 crc kubenswrapper[4703]: I0130 12:55:23.955239 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8p69q" event={"ID":"6b74ff63-b994-4cba-a3e5-09696ff2ab2d","Type":"ContainerStarted","Data":"e50bee8f2ca95dfd5ef8547037639988f4e5b5b236511ecccd7bb68e3cdeb733"} Jan 30 12:55:24 crc kubenswrapper[4703]: I0130 12:55:24.968812 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8p69q" event={"ID":"6b74ff63-b994-4cba-a3e5-09696ff2ab2d","Type":"ContainerStarted","Data":"69598711f265cf9fac4f5a8e7d97e9afa47f6f99e58b249051f7f8e9ec1f252d"} Jan 30 12:55:25 crc kubenswrapper[4703]: I0130 12:55:25.985348 4703 generic.go:334] "Generic (PLEG): container finished" podID="6b74ff63-b994-4cba-a3e5-09696ff2ab2d" containerID="69598711f265cf9fac4f5a8e7d97e9afa47f6f99e58b249051f7f8e9ec1f252d" exitCode=0 Jan 30 12:55:25 crc kubenswrapper[4703]: I0130 12:55:25.985470 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8p69q" event={"ID":"6b74ff63-b994-4cba-a3e5-09696ff2ab2d","Type":"ContainerDied","Data":"69598711f265cf9fac4f5a8e7d97e9afa47f6f99e58b249051f7f8e9ec1f252d"} Jan 30 12:55:27 crc kubenswrapper[4703]: I0130 12:55:26.999899 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8p69q" event={"ID":"6b74ff63-b994-4cba-a3e5-09696ff2ab2d","Type":"ContainerStarted","Data":"4abe1d43f380f5db6f8f7a5571b5b637a7f92c6a77c5fcbfd08a49efc6e467e0"} Jan 30 12:55:27 crc kubenswrapper[4703]: I0130 12:55:27.034346 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8p69q" podStartSLOduration=3.568237293 podStartE2EDuration="6.034315388s" podCreationTimestamp="2026-01-30 12:55:21 +0000 UTC" firstStartedPulling="2026-01-30 12:55:23.957014165 +0000 UTC m=+3559.734835809" lastFinishedPulling="2026-01-30 12:55:26.42309225 +0000 UTC m=+3562.200913904" observedRunningTime="2026-01-30 12:55:27.024819105 +0000 UTC m=+3562.802640759" watchObservedRunningTime="2026-01-30 12:55:27.034315388 +0000 UTC m=+3562.812137042" Jan 30 12:55:31 crc kubenswrapper[4703]: I0130 12:55:31.087190 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:55:31 crc kubenswrapper[4703]: E0130 12:55:31.088104 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:55:32 crc kubenswrapper[4703]: I0130 12:55:32.354277 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8p69q" Jan 30 12:55:32 crc kubenswrapper[4703]: I0130 12:55:32.354548 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8p69q" Jan 30 12:55:32 crc kubenswrapper[4703]: I0130 12:55:32.407227 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8p69q" Jan 30 12:55:33 crc kubenswrapper[4703]: I0130 12:55:33.124263 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8p69q" Jan 30 12:55:33 crc kubenswrapper[4703]: I0130 12:55:33.190189 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8p69q"] Jan 30 12:55:35 crc kubenswrapper[4703]: I0130 12:55:35.095465 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8p69q" podUID="6b74ff63-b994-4cba-a3e5-09696ff2ab2d" containerName="registry-server" containerID="cri-o://4abe1d43f380f5db6f8f7a5571b5b637a7f92c6a77c5fcbfd08a49efc6e467e0" gracePeriod=2 Jan 30 12:55:35 crc kubenswrapper[4703]: I0130 12:55:35.736641 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8p69q" Jan 30 12:55:35 crc kubenswrapper[4703]: I0130 12:55:35.843162 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bnwbj\" (UniqueName: \"kubernetes.io/projected/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-kube-api-access-bnwbj\") pod \"6b74ff63-b994-4cba-a3e5-09696ff2ab2d\" (UID: \"6b74ff63-b994-4cba-a3e5-09696ff2ab2d\") " Jan 30 12:55:35 crc kubenswrapper[4703]: I0130 12:55:35.843343 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-catalog-content\") pod \"6b74ff63-b994-4cba-a3e5-09696ff2ab2d\" (UID: \"6b74ff63-b994-4cba-a3e5-09696ff2ab2d\") " Jan 30 12:55:35 crc kubenswrapper[4703]: I0130 12:55:35.843546 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-utilities\") pod \"6b74ff63-b994-4cba-a3e5-09696ff2ab2d\" (UID: \"6b74ff63-b994-4cba-a3e5-09696ff2ab2d\") " Jan 30 12:55:35 crc kubenswrapper[4703]: I0130 12:55:35.845445 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-utilities" (OuterVolumeSpecName: "utilities") pod "6b74ff63-b994-4cba-a3e5-09696ff2ab2d" (UID: "6b74ff63-b994-4cba-a3e5-09696ff2ab2d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:55:35 crc kubenswrapper[4703]: I0130 12:55:35.851693 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-kube-api-access-bnwbj" (OuterVolumeSpecName: "kube-api-access-bnwbj") pod "6b74ff63-b994-4cba-a3e5-09696ff2ab2d" (UID: "6b74ff63-b994-4cba-a3e5-09696ff2ab2d"). InnerVolumeSpecName "kube-api-access-bnwbj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:55:35 crc kubenswrapper[4703]: I0130 12:55:35.900460 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6b74ff63-b994-4cba-a3e5-09696ff2ab2d" (UID: "6b74ff63-b994-4cba-a3e5-09696ff2ab2d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:55:35 crc kubenswrapper[4703]: I0130 12:55:35.947305 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bnwbj\" (UniqueName: \"kubernetes.io/projected/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-kube-api-access-bnwbj\") on node \"crc\" DevicePath \"\"" Jan 30 12:55:35 crc kubenswrapper[4703]: I0130 12:55:35.947363 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:55:35 crc kubenswrapper[4703]: I0130 12:55:35.947376 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6b74ff63-b994-4cba-a3e5-09696ff2ab2d-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:55:36 crc kubenswrapper[4703]: I0130 12:55:36.105481 4703 generic.go:334] "Generic (PLEG): container finished" podID="6b74ff63-b994-4cba-a3e5-09696ff2ab2d" containerID="4abe1d43f380f5db6f8f7a5571b5b637a7f92c6a77c5fcbfd08a49efc6e467e0" exitCode=0 Jan 30 12:55:36 crc kubenswrapper[4703]: I0130 12:55:36.105551 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8p69q" event={"ID":"6b74ff63-b994-4cba-a3e5-09696ff2ab2d","Type":"ContainerDied","Data":"4abe1d43f380f5db6f8f7a5571b5b637a7f92c6a77c5fcbfd08a49efc6e467e0"} Jan 30 12:55:36 crc kubenswrapper[4703]: I0130 12:55:36.105600 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8p69q" event={"ID":"6b74ff63-b994-4cba-a3e5-09696ff2ab2d","Type":"ContainerDied","Data":"e50bee8f2ca95dfd5ef8547037639988f4e5b5b236511ecccd7bb68e3cdeb733"} Jan 30 12:55:36 crc kubenswrapper[4703]: I0130 12:55:36.105617 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8p69q" Jan 30 12:55:36 crc kubenswrapper[4703]: I0130 12:55:36.105637 4703 scope.go:117] "RemoveContainer" containerID="4abe1d43f380f5db6f8f7a5571b5b637a7f92c6a77c5fcbfd08a49efc6e467e0" Jan 30 12:55:36 crc kubenswrapper[4703]: I0130 12:55:36.149446 4703 scope.go:117] "RemoveContainer" containerID="69598711f265cf9fac4f5a8e7d97e9afa47f6f99e58b249051f7f8e9ec1f252d" Jan 30 12:55:36 crc kubenswrapper[4703]: I0130 12:55:36.151143 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8p69q"] Jan 30 12:55:36 crc kubenswrapper[4703]: I0130 12:55:36.173407 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8p69q"] Jan 30 12:55:36 crc kubenswrapper[4703]: I0130 12:55:36.193311 4703 scope.go:117] "RemoveContainer" containerID="aa45e0aaca89db652d0b7465c7ca0d56f445d45735b3866b476cdb4b5bed1090" Jan 30 12:55:36 crc kubenswrapper[4703]: I0130 12:55:36.231906 4703 scope.go:117] "RemoveContainer" containerID="4abe1d43f380f5db6f8f7a5571b5b637a7f92c6a77c5fcbfd08a49efc6e467e0" Jan 30 12:55:36 crc kubenswrapper[4703]: E0130 12:55:36.232675 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4abe1d43f380f5db6f8f7a5571b5b637a7f92c6a77c5fcbfd08a49efc6e467e0\": container with ID starting with 4abe1d43f380f5db6f8f7a5571b5b637a7f92c6a77c5fcbfd08a49efc6e467e0 not found: ID does not exist" containerID="4abe1d43f380f5db6f8f7a5571b5b637a7f92c6a77c5fcbfd08a49efc6e467e0" Jan 30 12:55:36 crc kubenswrapper[4703]: I0130 12:55:36.232728 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4abe1d43f380f5db6f8f7a5571b5b637a7f92c6a77c5fcbfd08a49efc6e467e0"} err="failed to get container status \"4abe1d43f380f5db6f8f7a5571b5b637a7f92c6a77c5fcbfd08a49efc6e467e0\": rpc error: code = NotFound desc = could not find container \"4abe1d43f380f5db6f8f7a5571b5b637a7f92c6a77c5fcbfd08a49efc6e467e0\": container with ID starting with 4abe1d43f380f5db6f8f7a5571b5b637a7f92c6a77c5fcbfd08a49efc6e467e0 not found: ID does not exist" Jan 30 12:55:36 crc kubenswrapper[4703]: I0130 12:55:36.232773 4703 scope.go:117] "RemoveContainer" containerID="69598711f265cf9fac4f5a8e7d97e9afa47f6f99e58b249051f7f8e9ec1f252d" Jan 30 12:55:36 crc kubenswrapper[4703]: E0130 12:55:36.233697 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69598711f265cf9fac4f5a8e7d97e9afa47f6f99e58b249051f7f8e9ec1f252d\": container with ID starting with 69598711f265cf9fac4f5a8e7d97e9afa47f6f99e58b249051f7f8e9ec1f252d not found: ID does not exist" containerID="69598711f265cf9fac4f5a8e7d97e9afa47f6f99e58b249051f7f8e9ec1f252d" Jan 30 12:55:36 crc kubenswrapper[4703]: I0130 12:55:36.233747 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69598711f265cf9fac4f5a8e7d97e9afa47f6f99e58b249051f7f8e9ec1f252d"} err="failed to get container status \"69598711f265cf9fac4f5a8e7d97e9afa47f6f99e58b249051f7f8e9ec1f252d\": rpc error: code = NotFound desc = could not find container \"69598711f265cf9fac4f5a8e7d97e9afa47f6f99e58b249051f7f8e9ec1f252d\": container with ID starting with 69598711f265cf9fac4f5a8e7d97e9afa47f6f99e58b249051f7f8e9ec1f252d not found: ID does not exist" Jan 30 12:55:36 crc kubenswrapper[4703]: I0130 12:55:36.233780 4703 scope.go:117] "RemoveContainer" containerID="aa45e0aaca89db652d0b7465c7ca0d56f445d45735b3866b476cdb4b5bed1090" Jan 30 12:55:36 crc kubenswrapper[4703]: E0130 12:55:36.234655 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa45e0aaca89db652d0b7465c7ca0d56f445d45735b3866b476cdb4b5bed1090\": container with ID starting with aa45e0aaca89db652d0b7465c7ca0d56f445d45735b3866b476cdb4b5bed1090 not found: ID does not exist" containerID="aa45e0aaca89db652d0b7465c7ca0d56f445d45735b3866b476cdb4b5bed1090" Jan 30 12:55:36 crc kubenswrapper[4703]: I0130 12:55:36.234685 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa45e0aaca89db652d0b7465c7ca0d56f445d45735b3866b476cdb4b5bed1090"} err="failed to get container status \"aa45e0aaca89db652d0b7465c7ca0d56f445d45735b3866b476cdb4b5bed1090\": rpc error: code = NotFound desc = could not find container \"aa45e0aaca89db652d0b7465c7ca0d56f445d45735b3866b476cdb4b5bed1090\": container with ID starting with aa45e0aaca89db652d0b7465c7ca0d56f445d45735b3866b476cdb4b5bed1090 not found: ID does not exist" Jan 30 12:55:37 crc kubenswrapper[4703]: I0130 12:55:37.099999 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b74ff63-b994-4cba-a3e5-09696ff2ab2d" path="/var/lib/kubelet/pods/6b74ff63-b994-4cba-a3e5-09696ff2ab2d/volumes" Jan 30 12:55:45 crc kubenswrapper[4703]: I0130 12:55:45.104912 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:55:45 crc kubenswrapper[4703]: E0130 12:55:45.105791 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:55:51 crc kubenswrapper[4703]: I0130 12:55:51.477997 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tck26"] Jan 30 12:55:51 crc kubenswrapper[4703]: E0130 12:55:51.479196 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b74ff63-b994-4cba-a3e5-09696ff2ab2d" containerName="extract-content" Jan 30 12:55:51 crc kubenswrapper[4703]: I0130 12:55:51.479210 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b74ff63-b994-4cba-a3e5-09696ff2ab2d" containerName="extract-content" Jan 30 12:55:51 crc kubenswrapper[4703]: E0130 12:55:51.479258 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b74ff63-b994-4cba-a3e5-09696ff2ab2d" containerName="extract-utilities" Jan 30 12:55:51 crc kubenswrapper[4703]: I0130 12:55:51.479267 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b74ff63-b994-4cba-a3e5-09696ff2ab2d" containerName="extract-utilities" Jan 30 12:55:51 crc kubenswrapper[4703]: E0130 12:55:51.479281 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b74ff63-b994-4cba-a3e5-09696ff2ab2d" containerName="registry-server" Jan 30 12:55:51 crc kubenswrapper[4703]: I0130 12:55:51.479319 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b74ff63-b994-4cba-a3e5-09696ff2ab2d" containerName="registry-server" Jan 30 12:55:51 crc kubenswrapper[4703]: I0130 12:55:51.479573 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b74ff63-b994-4cba-a3e5-09696ff2ab2d" containerName="registry-server" Jan 30 12:55:51 crc kubenswrapper[4703]: I0130 12:55:51.481316 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tck26" Jan 30 12:55:51 crc kubenswrapper[4703]: I0130 12:55:51.492842 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tck26"] Jan 30 12:55:51 crc kubenswrapper[4703]: I0130 12:55:51.645220 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5eff89da-8248-424c-8639-dde265a5ce6a-utilities\") pod \"certified-operators-tck26\" (UID: \"5eff89da-8248-424c-8639-dde265a5ce6a\") " pod="openshift-marketplace/certified-operators-tck26" Jan 30 12:55:51 crc kubenswrapper[4703]: I0130 12:55:51.645799 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dc6fb\" (UniqueName: \"kubernetes.io/projected/5eff89da-8248-424c-8639-dde265a5ce6a-kube-api-access-dc6fb\") pod \"certified-operators-tck26\" (UID: \"5eff89da-8248-424c-8639-dde265a5ce6a\") " pod="openshift-marketplace/certified-operators-tck26" Jan 30 12:55:51 crc kubenswrapper[4703]: I0130 12:55:51.645915 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5eff89da-8248-424c-8639-dde265a5ce6a-catalog-content\") pod \"certified-operators-tck26\" (UID: \"5eff89da-8248-424c-8639-dde265a5ce6a\") " pod="openshift-marketplace/certified-operators-tck26" Jan 30 12:55:51 crc kubenswrapper[4703]: I0130 12:55:51.748181 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5eff89da-8248-424c-8639-dde265a5ce6a-utilities\") pod \"certified-operators-tck26\" (UID: \"5eff89da-8248-424c-8639-dde265a5ce6a\") " pod="openshift-marketplace/certified-operators-tck26" Jan 30 12:55:51 crc kubenswrapper[4703]: I0130 12:55:51.748282 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dc6fb\" (UniqueName: \"kubernetes.io/projected/5eff89da-8248-424c-8639-dde265a5ce6a-kube-api-access-dc6fb\") pod \"certified-operators-tck26\" (UID: \"5eff89da-8248-424c-8639-dde265a5ce6a\") " pod="openshift-marketplace/certified-operators-tck26" Jan 30 12:55:51 crc kubenswrapper[4703]: I0130 12:55:51.748348 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5eff89da-8248-424c-8639-dde265a5ce6a-catalog-content\") pod \"certified-operators-tck26\" (UID: \"5eff89da-8248-424c-8639-dde265a5ce6a\") " pod="openshift-marketplace/certified-operators-tck26" Jan 30 12:55:51 crc kubenswrapper[4703]: I0130 12:55:51.749000 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5eff89da-8248-424c-8639-dde265a5ce6a-catalog-content\") pod \"certified-operators-tck26\" (UID: \"5eff89da-8248-424c-8639-dde265a5ce6a\") " pod="openshift-marketplace/certified-operators-tck26" Jan 30 12:55:51 crc kubenswrapper[4703]: I0130 12:55:51.749311 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5eff89da-8248-424c-8639-dde265a5ce6a-utilities\") pod \"certified-operators-tck26\" (UID: \"5eff89da-8248-424c-8639-dde265a5ce6a\") " pod="openshift-marketplace/certified-operators-tck26" Jan 30 12:55:51 crc kubenswrapper[4703]: I0130 12:55:51.794681 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dc6fb\" (UniqueName: \"kubernetes.io/projected/5eff89da-8248-424c-8639-dde265a5ce6a-kube-api-access-dc6fb\") pod \"certified-operators-tck26\" (UID: \"5eff89da-8248-424c-8639-dde265a5ce6a\") " pod="openshift-marketplace/certified-operators-tck26" Jan 30 12:55:51 crc kubenswrapper[4703]: I0130 12:55:51.832550 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tck26" Jan 30 12:55:52 crc kubenswrapper[4703]: I0130 12:55:52.481238 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tck26"] Jan 30 12:55:53 crc kubenswrapper[4703]: I0130 12:55:53.313419 4703 generic.go:334] "Generic (PLEG): container finished" podID="5eff89da-8248-424c-8639-dde265a5ce6a" containerID="19338213e1f7bf84b4513064d3d2c56116cc9cbeedd8c1d869fc833ccbb5f2dd" exitCode=0 Jan 30 12:55:53 crc kubenswrapper[4703]: I0130 12:55:53.316035 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tck26" event={"ID":"5eff89da-8248-424c-8639-dde265a5ce6a","Type":"ContainerDied","Data":"19338213e1f7bf84b4513064d3d2c56116cc9cbeedd8c1d869fc833ccbb5f2dd"} Jan 30 12:55:53 crc kubenswrapper[4703]: I0130 12:55:53.316076 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tck26" event={"ID":"5eff89da-8248-424c-8639-dde265a5ce6a","Type":"ContainerStarted","Data":"570053d970118632f093988319431de37ef12bba8c6afd2576b8b8b4b05922b3"} Jan 30 12:55:55 crc kubenswrapper[4703]: I0130 12:55:55.343191 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tck26" event={"ID":"5eff89da-8248-424c-8639-dde265a5ce6a","Type":"ContainerStarted","Data":"71c02dc43ff75a6868b548778247eaff43c85753fe31298910bf8a477c09697e"} Jan 30 12:55:56 crc kubenswrapper[4703]: I0130 12:55:56.086672 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:55:56 crc kubenswrapper[4703]: E0130 12:55:56.087573 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:55:56 crc kubenswrapper[4703]: I0130 12:55:56.359292 4703 generic.go:334] "Generic (PLEG): container finished" podID="5eff89da-8248-424c-8639-dde265a5ce6a" containerID="71c02dc43ff75a6868b548778247eaff43c85753fe31298910bf8a477c09697e" exitCode=0 Jan 30 12:55:56 crc kubenswrapper[4703]: I0130 12:55:56.359358 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tck26" event={"ID":"5eff89da-8248-424c-8639-dde265a5ce6a","Type":"ContainerDied","Data":"71c02dc43ff75a6868b548778247eaff43c85753fe31298910bf8a477c09697e"} Jan 30 12:55:57 crc kubenswrapper[4703]: I0130 12:55:57.371738 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tck26" event={"ID":"5eff89da-8248-424c-8639-dde265a5ce6a","Type":"ContainerStarted","Data":"c6e0d46497f7f730724eeb61c4b438a7b5a1c2fa3d12b2ffa2ba2f784b4bfe56"} Jan 30 12:55:57 crc kubenswrapper[4703]: I0130 12:55:57.399871 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tck26" podStartSLOduration=2.939942781 podStartE2EDuration="6.399824197s" podCreationTimestamp="2026-01-30 12:55:51 +0000 UTC" firstStartedPulling="2026-01-30 12:55:53.317985888 +0000 UTC m=+3589.095807542" lastFinishedPulling="2026-01-30 12:55:56.777867304 +0000 UTC m=+3592.555688958" observedRunningTime="2026-01-30 12:55:57.391631369 +0000 UTC m=+3593.169453033" watchObservedRunningTime="2026-01-30 12:55:57.399824197 +0000 UTC m=+3593.177645851" Jan 30 12:56:01 crc kubenswrapper[4703]: I0130 12:56:01.827836 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tck26" Jan 30 12:56:01 crc kubenswrapper[4703]: I0130 12:56:01.833666 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tck26" Jan 30 12:56:01 crc kubenswrapper[4703]: I0130 12:56:01.879638 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tck26" Jan 30 12:56:02 crc kubenswrapper[4703]: I0130 12:56:02.483811 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tck26" Jan 30 12:56:02 crc kubenswrapper[4703]: I0130 12:56:02.542440 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tck26"] Jan 30 12:56:04 crc kubenswrapper[4703]: I0130 12:56:04.448852 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tck26" podUID="5eff89da-8248-424c-8639-dde265a5ce6a" containerName="registry-server" containerID="cri-o://c6e0d46497f7f730724eeb61c4b438a7b5a1c2fa3d12b2ffa2ba2f784b4bfe56" gracePeriod=2 Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.076804 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tck26" Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.248278 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5eff89da-8248-424c-8639-dde265a5ce6a-utilities\") pod \"5eff89da-8248-424c-8639-dde265a5ce6a\" (UID: \"5eff89da-8248-424c-8639-dde265a5ce6a\") " Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.249286 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dc6fb\" (UniqueName: \"kubernetes.io/projected/5eff89da-8248-424c-8639-dde265a5ce6a-kube-api-access-dc6fb\") pod \"5eff89da-8248-424c-8639-dde265a5ce6a\" (UID: \"5eff89da-8248-424c-8639-dde265a5ce6a\") " Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.249354 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5eff89da-8248-424c-8639-dde265a5ce6a-catalog-content\") pod \"5eff89da-8248-424c-8639-dde265a5ce6a\" (UID: \"5eff89da-8248-424c-8639-dde265a5ce6a\") " Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.249460 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5eff89da-8248-424c-8639-dde265a5ce6a-utilities" (OuterVolumeSpecName: "utilities") pod "5eff89da-8248-424c-8639-dde265a5ce6a" (UID: "5eff89da-8248-424c-8639-dde265a5ce6a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.250200 4703 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5eff89da-8248-424c-8639-dde265a5ce6a-utilities\") on node \"crc\" DevicePath \"\"" Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.256712 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5eff89da-8248-424c-8639-dde265a5ce6a-kube-api-access-dc6fb" (OuterVolumeSpecName: "kube-api-access-dc6fb") pod "5eff89da-8248-424c-8639-dde265a5ce6a" (UID: "5eff89da-8248-424c-8639-dde265a5ce6a"). InnerVolumeSpecName "kube-api-access-dc6fb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.352686 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dc6fb\" (UniqueName: \"kubernetes.io/projected/5eff89da-8248-424c-8639-dde265a5ce6a-kube-api-access-dc6fb\") on node \"crc\" DevicePath \"\"" Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.463156 4703 generic.go:334] "Generic (PLEG): container finished" podID="5eff89da-8248-424c-8639-dde265a5ce6a" containerID="c6e0d46497f7f730724eeb61c4b438a7b5a1c2fa3d12b2ffa2ba2f784b4bfe56" exitCode=0 Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.463214 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tck26" event={"ID":"5eff89da-8248-424c-8639-dde265a5ce6a","Type":"ContainerDied","Data":"c6e0d46497f7f730724eeb61c4b438a7b5a1c2fa3d12b2ffa2ba2f784b4bfe56"} Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.463279 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tck26" event={"ID":"5eff89da-8248-424c-8639-dde265a5ce6a","Type":"ContainerDied","Data":"570053d970118632f093988319431de37ef12bba8c6afd2576b8b8b4b05922b3"} Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.463290 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tck26" Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.463301 4703 scope.go:117] "RemoveContainer" containerID="c6e0d46497f7f730724eeb61c4b438a7b5a1c2fa3d12b2ffa2ba2f784b4bfe56" Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.493995 4703 scope.go:117] "RemoveContainer" containerID="71c02dc43ff75a6868b548778247eaff43c85753fe31298910bf8a477c09697e" Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.526784 4703 scope.go:117] "RemoveContainer" containerID="19338213e1f7bf84b4513064d3d2c56116cc9cbeedd8c1d869fc833ccbb5f2dd" Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.612699 4703 scope.go:117] "RemoveContainer" containerID="c6e0d46497f7f730724eeb61c4b438a7b5a1c2fa3d12b2ffa2ba2f784b4bfe56" Jan 30 12:56:05 crc kubenswrapper[4703]: E0130 12:56:05.613438 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6e0d46497f7f730724eeb61c4b438a7b5a1c2fa3d12b2ffa2ba2f784b4bfe56\": container with ID starting with c6e0d46497f7f730724eeb61c4b438a7b5a1c2fa3d12b2ffa2ba2f784b4bfe56 not found: ID does not exist" containerID="c6e0d46497f7f730724eeb61c4b438a7b5a1c2fa3d12b2ffa2ba2f784b4bfe56" Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.613508 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6e0d46497f7f730724eeb61c4b438a7b5a1c2fa3d12b2ffa2ba2f784b4bfe56"} err="failed to get container status \"c6e0d46497f7f730724eeb61c4b438a7b5a1c2fa3d12b2ffa2ba2f784b4bfe56\": rpc error: code = NotFound desc = could not find container \"c6e0d46497f7f730724eeb61c4b438a7b5a1c2fa3d12b2ffa2ba2f784b4bfe56\": container with ID starting with c6e0d46497f7f730724eeb61c4b438a7b5a1c2fa3d12b2ffa2ba2f784b4bfe56 not found: ID does not exist" Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.613549 4703 scope.go:117] "RemoveContainer" containerID="71c02dc43ff75a6868b548778247eaff43c85753fe31298910bf8a477c09697e" Jan 30 12:56:05 crc kubenswrapper[4703]: E0130 12:56:05.613940 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71c02dc43ff75a6868b548778247eaff43c85753fe31298910bf8a477c09697e\": container with ID starting with 71c02dc43ff75a6868b548778247eaff43c85753fe31298910bf8a477c09697e not found: ID does not exist" containerID="71c02dc43ff75a6868b548778247eaff43c85753fe31298910bf8a477c09697e" Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.613986 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71c02dc43ff75a6868b548778247eaff43c85753fe31298910bf8a477c09697e"} err="failed to get container status \"71c02dc43ff75a6868b548778247eaff43c85753fe31298910bf8a477c09697e\": rpc error: code = NotFound desc = could not find container \"71c02dc43ff75a6868b548778247eaff43c85753fe31298910bf8a477c09697e\": container with ID starting with 71c02dc43ff75a6868b548778247eaff43c85753fe31298910bf8a477c09697e not found: ID does not exist" Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.614015 4703 scope.go:117] "RemoveContainer" containerID="19338213e1f7bf84b4513064d3d2c56116cc9cbeedd8c1d869fc833ccbb5f2dd" Jan 30 12:56:05 crc kubenswrapper[4703]: E0130 12:56:05.614370 4703 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19338213e1f7bf84b4513064d3d2c56116cc9cbeedd8c1d869fc833ccbb5f2dd\": container with ID starting with 19338213e1f7bf84b4513064d3d2c56116cc9cbeedd8c1d869fc833ccbb5f2dd not found: ID does not exist" containerID="19338213e1f7bf84b4513064d3d2c56116cc9cbeedd8c1d869fc833ccbb5f2dd" Jan 30 12:56:05 crc kubenswrapper[4703]: I0130 12:56:05.614398 4703 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19338213e1f7bf84b4513064d3d2c56116cc9cbeedd8c1d869fc833ccbb5f2dd"} err="failed to get container status \"19338213e1f7bf84b4513064d3d2c56116cc9cbeedd8c1d869fc833ccbb5f2dd\": rpc error: code = NotFound desc = could not find container \"19338213e1f7bf84b4513064d3d2c56116cc9cbeedd8c1d869fc833ccbb5f2dd\": container with ID starting with 19338213e1f7bf84b4513064d3d2c56116cc9cbeedd8c1d869fc833ccbb5f2dd not found: ID does not exist" Jan 30 12:56:06 crc kubenswrapper[4703]: I0130 12:56:06.007364 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5eff89da-8248-424c-8639-dde265a5ce6a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5eff89da-8248-424c-8639-dde265a5ce6a" (UID: "5eff89da-8248-424c-8639-dde265a5ce6a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:56:06 crc kubenswrapper[4703]: I0130 12:56:06.077599 4703 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5eff89da-8248-424c-8639-dde265a5ce6a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 30 12:56:06 crc kubenswrapper[4703]: I0130 12:56:06.118537 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tck26"] Jan 30 12:56:06 crc kubenswrapper[4703]: I0130 12:56:06.131267 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tck26"] Jan 30 12:56:07 crc kubenswrapper[4703]: I0130 12:56:07.100313 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5eff89da-8248-424c-8639-dde265a5ce6a" path="/var/lib/kubelet/pods/5eff89da-8248-424c-8639-dde265a5ce6a/volumes" Jan 30 12:56:11 crc kubenswrapper[4703]: I0130 12:56:11.088742 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:56:11 crc kubenswrapper[4703]: E0130 12:56:11.089723 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:56:12 crc kubenswrapper[4703]: I0130 12:56:12.823479 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:56:12 crc kubenswrapper[4703]: I0130 12:56:12.823958 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:56:26 crc kubenswrapper[4703]: I0130 12:56:26.086854 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:56:26 crc kubenswrapper[4703]: I0130 12:56:26.723923 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerStarted","Data":"9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862"} Jan 30 12:56:30 crc kubenswrapper[4703]: I0130 12:56:30.798002 4703 generic.go:334] "Generic (PLEG): container finished" podID="2fc19a6b-3cde-4bb5-9499-f5be846289da" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" exitCode=1 Jan 30 12:56:30 crc kubenswrapper[4703]: I0130 12:56:30.798616 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"2fc19a6b-3cde-4bb5-9499-f5be846289da","Type":"ContainerDied","Data":"9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862"} Jan 30 12:56:30 crc kubenswrapper[4703]: I0130 12:56:30.798668 4703 scope.go:117] "RemoveContainer" containerID="11484f30526e06dba0f03408cf18a62a8814f95b1f09b1f954011117e5fbdcc2" Jan 30 12:56:30 crc kubenswrapper[4703]: I0130 12:56:30.799633 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:56:30 crc kubenswrapper[4703]: E0130 12:56:30.799911 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:56:31 crc kubenswrapper[4703]: I0130 12:56:31.034978 4703 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 30 12:56:31 crc kubenswrapper[4703]: I0130 12:56:31.817727 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:56:31 crc kubenswrapper[4703]: E0130 12:56:31.818318 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:56:36 crc kubenswrapper[4703]: I0130 12:56:36.034880 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:56:36 crc kubenswrapper[4703]: I0130 12:56:36.035699 4703 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:56:36 crc kubenswrapper[4703]: I0130 12:56:36.035715 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 30 12:56:36 crc kubenswrapper[4703]: I0130 12:56:36.036887 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:56:36 crc kubenswrapper[4703]: E0130 12:56:36.037332 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:56:42 crc kubenswrapper[4703]: I0130 12:56:42.823220 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:56:42 crc kubenswrapper[4703]: I0130 12:56:42.824161 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:56:47 crc kubenswrapper[4703]: I0130 12:56:47.087150 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:56:47 crc kubenswrapper[4703]: E0130 12:56:47.088035 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:56:59 crc kubenswrapper[4703]: I0130 12:56:59.186546 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-z4rlc/must-gather-gdfpw" event={"ID":"7c916403-f0bc-47c8-bd85-d9977a5d9fa8","Type":"ContainerDied","Data":"ddbbd288d8c1469e5ffa8b08baee0389538f728dff5093cbd5f4c145fb79e106"} Jan 30 12:56:59 crc kubenswrapper[4703]: I0130 12:56:59.186446 4703 generic.go:334] "Generic (PLEG): container finished" podID="7c916403-f0bc-47c8-bd85-d9977a5d9fa8" containerID="ddbbd288d8c1469e5ffa8b08baee0389538f728dff5093cbd5f4c145fb79e106" exitCode=0 Jan 30 12:56:59 crc kubenswrapper[4703]: I0130 12:56:59.187827 4703 scope.go:117] "RemoveContainer" containerID="ddbbd288d8c1469e5ffa8b08baee0389538f728dff5093cbd5f4c145fb79e106" Jan 30 12:56:59 crc kubenswrapper[4703]: I0130 12:56:59.484515 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-z4rlc_must-gather-gdfpw_7c916403-f0bc-47c8-bd85-d9977a5d9fa8/gather/0.log" Jan 30 12:57:00 crc kubenswrapper[4703]: I0130 12:57:00.087758 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:57:00 crc kubenswrapper[4703]: E0130 12:57:00.088030 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:57:08 crc kubenswrapper[4703]: I0130 12:57:08.975971 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-z4rlc/must-gather-gdfpw"] Jan 30 12:57:08 crc kubenswrapper[4703]: I0130 12:57:08.976867 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-z4rlc/must-gather-gdfpw" podUID="7c916403-f0bc-47c8-bd85-d9977a5d9fa8" containerName="copy" containerID="cri-o://48d417a3c7be1853b19914b7bb163ca2d115fedcee04898512dba6c339fe88b5" gracePeriod=2 Jan 30 12:57:08 crc kubenswrapper[4703]: I0130 12:57:08.987848 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-z4rlc/must-gather-gdfpw"] Jan 30 12:57:09 crc kubenswrapper[4703]: I0130 12:57:09.313926 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-z4rlc_must-gather-gdfpw_7c916403-f0bc-47c8-bd85-d9977a5d9fa8/copy/0.log" Jan 30 12:57:09 crc kubenswrapper[4703]: I0130 12:57:09.314511 4703 generic.go:334] "Generic (PLEG): container finished" podID="7c916403-f0bc-47c8-bd85-d9977a5d9fa8" containerID="48d417a3c7be1853b19914b7bb163ca2d115fedcee04898512dba6c339fe88b5" exitCode=143 Jan 30 12:57:10 crc kubenswrapper[4703]: I0130 12:57:10.032343 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-z4rlc_must-gather-gdfpw_7c916403-f0bc-47c8-bd85-d9977a5d9fa8/copy/0.log" Jan 30 12:57:10 crc kubenswrapper[4703]: I0130 12:57:10.033218 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z4rlc/must-gather-gdfpw" Jan 30 12:57:10 crc kubenswrapper[4703]: I0130 12:57:10.102661 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-458g8\" (UniqueName: \"kubernetes.io/projected/7c916403-f0bc-47c8-bd85-d9977a5d9fa8-kube-api-access-458g8\") pod \"7c916403-f0bc-47c8-bd85-d9977a5d9fa8\" (UID: \"7c916403-f0bc-47c8-bd85-d9977a5d9fa8\") " Jan 30 12:57:10 crc kubenswrapper[4703]: I0130 12:57:10.102841 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/7c916403-f0bc-47c8-bd85-d9977a5d9fa8-must-gather-output\") pod \"7c916403-f0bc-47c8-bd85-d9977a5d9fa8\" (UID: \"7c916403-f0bc-47c8-bd85-d9977a5d9fa8\") " Jan 30 12:57:10 crc kubenswrapper[4703]: I0130 12:57:10.114503 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c916403-f0bc-47c8-bd85-d9977a5d9fa8-kube-api-access-458g8" (OuterVolumeSpecName: "kube-api-access-458g8") pod "7c916403-f0bc-47c8-bd85-d9977a5d9fa8" (UID: "7c916403-f0bc-47c8-bd85-d9977a5d9fa8"). InnerVolumeSpecName "kube-api-access-458g8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 12:57:10 crc kubenswrapper[4703]: I0130 12:57:10.206081 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-458g8\" (UniqueName: \"kubernetes.io/projected/7c916403-f0bc-47c8-bd85-d9977a5d9fa8-kube-api-access-458g8\") on node \"crc\" DevicePath \"\"" Jan 30 12:57:10 crc kubenswrapper[4703]: I0130 12:57:10.337868 4703 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-z4rlc_must-gather-gdfpw_7c916403-f0bc-47c8-bd85-d9977a5d9fa8/copy/0.log" Jan 30 12:57:10 crc kubenswrapper[4703]: I0130 12:57:10.338588 4703 scope.go:117] "RemoveContainer" containerID="48d417a3c7be1853b19914b7bb163ca2d115fedcee04898512dba6c339fe88b5" Jan 30 12:57:10 crc kubenswrapper[4703]: I0130 12:57:10.338865 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-z4rlc/must-gather-gdfpw" Jan 30 12:57:10 crc kubenswrapper[4703]: I0130 12:57:10.372287 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c916403-f0bc-47c8-bd85-d9977a5d9fa8-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "7c916403-f0bc-47c8-bd85-d9977a5d9fa8" (UID: "7c916403-f0bc-47c8-bd85-d9977a5d9fa8"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 30 12:57:10 crc kubenswrapper[4703]: I0130 12:57:10.382524 4703 scope.go:117] "RemoveContainer" containerID="ddbbd288d8c1469e5ffa8b08baee0389538f728dff5093cbd5f4c145fb79e106" Jan 30 12:57:10 crc kubenswrapper[4703]: I0130 12:57:10.414350 4703 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/7c916403-f0bc-47c8-bd85-d9977a5d9fa8-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 30 12:57:11 crc kubenswrapper[4703]: I0130 12:57:11.086894 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:57:11 crc kubenswrapper[4703]: E0130 12:57:11.087964 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:57:11 crc kubenswrapper[4703]: I0130 12:57:11.101562 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c916403-f0bc-47c8-bd85-d9977a5d9fa8" path="/var/lib/kubelet/pods/7c916403-f0bc-47c8-bd85-d9977a5d9fa8/volumes" Jan 30 12:57:12 crc kubenswrapper[4703]: I0130 12:57:12.823523 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:57:12 crc kubenswrapper[4703]: I0130 12:57:12.824184 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:57:12 crc kubenswrapper[4703]: I0130 12:57:12.824259 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 12:57:12 crc kubenswrapper[4703]: I0130 12:57:12.825440 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6b57ab84336d4fe8c2455961ec7c9b8c7fd3b27d7b48885d7b70a5e429c4b7d2"} pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 12:57:12 crc kubenswrapper[4703]: I0130 12:57:12.825508 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" containerID="cri-o://6b57ab84336d4fe8c2455961ec7c9b8c7fd3b27d7b48885d7b70a5e429c4b7d2" gracePeriod=600 Jan 30 12:57:13 crc kubenswrapper[4703]: I0130 12:57:13.376362 4703 generic.go:334] "Generic (PLEG): container finished" podID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerID="6b57ab84336d4fe8c2455961ec7c9b8c7fd3b27d7b48885d7b70a5e429c4b7d2" exitCode=0 Jan 30 12:57:13 crc kubenswrapper[4703]: I0130 12:57:13.376441 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerDied","Data":"6b57ab84336d4fe8c2455961ec7c9b8c7fd3b27d7b48885d7b70a5e429c4b7d2"} Jan 30 12:57:13 crc kubenswrapper[4703]: I0130 12:57:13.376729 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerStarted","Data":"cce881e424005d8a2cc96aeff857d9755c389a1eabfe2301906a5ed7ff41c251"} Jan 30 12:57:13 crc kubenswrapper[4703]: I0130 12:57:13.376759 4703 scope.go:117] "RemoveContainer" containerID="6f8779ea718b9d4c45afbaf021ccd9450ae8a6d8a120a307c0765dcda8c16913" Jan 30 12:57:23 crc kubenswrapper[4703]: I0130 12:57:23.086103 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:57:23 crc kubenswrapper[4703]: E0130 12:57:23.088280 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:57:36 crc kubenswrapper[4703]: I0130 12:57:36.088150 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:57:36 crc kubenswrapper[4703]: E0130 12:57:36.089263 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:57:48 crc kubenswrapper[4703]: I0130 12:57:48.086701 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:57:48 crc kubenswrapper[4703]: E0130 12:57:48.087593 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:58:03 crc kubenswrapper[4703]: I0130 12:58:03.088634 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:58:03 crc kubenswrapper[4703]: E0130 12:58:03.089775 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:58:18 crc kubenswrapper[4703]: I0130 12:58:18.086385 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:58:18 crc kubenswrapper[4703]: E0130 12:58:18.087264 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:58:33 crc kubenswrapper[4703]: I0130 12:58:33.087756 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:58:33 crc kubenswrapper[4703]: E0130 12:58:33.088956 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:58:44 crc kubenswrapper[4703]: I0130 12:58:44.087089 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:58:44 crc kubenswrapper[4703]: E0130 12:58:44.088039 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:58:57 crc kubenswrapper[4703]: I0130 12:58:57.086810 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:58:57 crc kubenswrapper[4703]: E0130 12:58:57.087789 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:59:09 crc kubenswrapper[4703]: I0130 12:59:09.087434 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:59:09 crc kubenswrapper[4703]: E0130 12:59:09.088367 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:59:20 crc kubenswrapper[4703]: I0130 12:59:20.086032 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:59:20 crc kubenswrapper[4703]: E0130 12:59:20.086829 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:59:33 crc kubenswrapper[4703]: I0130 12:59:33.089575 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:59:33 crc kubenswrapper[4703]: E0130 12:59:33.090444 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:59:42 crc kubenswrapper[4703]: I0130 12:59:42.823308 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 12:59:42 crc kubenswrapper[4703]: I0130 12:59:42.824107 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 12:59:48 crc kubenswrapper[4703]: I0130 12:59:48.087531 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:59:48 crc kubenswrapper[4703]: E0130 12:59:48.088652 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 12:59:59 crc kubenswrapper[4703]: I0130 12:59:59.087846 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 12:59:59 crc kubenswrapper[4703]: E0130 12:59:59.091392 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.188217 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz"] Jan 30 13:00:00 crc kubenswrapper[4703]: E0130 13:00:00.189759 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5eff89da-8248-424c-8639-dde265a5ce6a" containerName="extract-content" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.189783 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="5eff89da-8248-424c-8639-dde265a5ce6a" containerName="extract-content" Jan 30 13:00:00 crc kubenswrapper[4703]: E0130 13:00:00.189800 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5eff89da-8248-424c-8639-dde265a5ce6a" containerName="extract-utilities" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.189809 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="5eff89da-8248-424c-8639-dde265a5ce6a" containerName="extract-utilities" Jan 30 13:00:00 crc kubenswrapper[4703]: E0130 13:00:00.189824 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c916403-f0bc-47c8-bd85-d9977a5d9fa8" containerName="gather" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.189835 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c916403-f0bc-47c8-bd85-d9977a5d9fa8" containerName="gather" Jan 30 13:00:00 crc kubenswrapper[4703]: E0130 13:00:00.189851 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c916403-f0bc-47c8-bd85-d9977a5d9fa8" containerName="copy" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.189857 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c916403-f0bc-47c8-bd85-d9977a5d9fa8" containerName="copy" Jan 30 13:00:00 crc kubenswrapper[4703]: E0130 13:00:00.189886 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5eff89da-8248-424c-8639-dde265a5ce6a" containerName="registry-server" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.189893 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="5eff89da-8248-424c-8639-dde265a5ce6a" containerName="registry-server" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.190169 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c916403-f0bc-47c8-bd85-d9977a5d9fa8" containerName="gather" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.190195 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="5eff89da-8248-424c-8639-dde265a5ce6a" containerName="registry-server" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.190218 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c916403-f0bc-47c8-bd85-d9977a5d9fa8" containerName="copy" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.191433 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.194873 4703 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.195253 4703 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.208475 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz"] Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.390522 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-config-volume\") pod \"collect-profiles-29496300-t94pz\" (UID: \"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.390579 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ds48v\" (UniqueName: \"kubernetes.io/projected/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-kube-api-access-ds48v\") pod \"collect-profiles-29496300-t94pz\" (UID: \"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.390647 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-secret-volume\") pod \"collect-profiles-29496300-t94pz\" (UID: \"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.495094 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-secret-volume\") pod \"collect-profiles-29496300-t94pz\" (UID: \"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.495378 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-config-volume\") pod \"collect-profiles-29496300-t94pz\" (UID: \"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.495421 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ds48v\" (UniqueName: \"kubernetes.io/projected/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-kube-api-access-ds48v\") pod \"collect-profiles-29496300-t94pz\" (UID: \"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.497644 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-config-volume\") pod \"collect-profiles-29496300-t94pz\" (UID: \"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.506165 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-secret-volume\") pod \"collect-profiles-29496300-t94pz\" (UID: \"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.513385 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ds48v\" (UniqueName: \"kubernetes.io/projected/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-kube-api-access-ds48v\") pod \"collect-profiles-29496300-t94pz\" (UID: \"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz" Jan 30 13:00:00 crc kubenswrapper[4703]: I0130 13:00:00.521010 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz" Jan 30 13:00:01 crc kubenswrapper[4703]: I0130 13:00:01.294683 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz"] Jan 30 13:00:01 crc kubenswrapper[4703]: I0130 13:00:01.406744 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz" event={"ID":"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6","Type":"ContainerStarted","Data":"63b9192f1fa92b3c3c512bc8f4cd32d50c42a78e2d214aa3d0e423b4175243e0"} Jan 30 13:00:02 crc kubenswrapper[4703]: I0130 13:00:02.418999 4703 generic.go:334] "Generic (PLEG): container finished" podID="50b425ab-f86e-480d-ae85-1ae1b7cbc8d6" containerID="043474a3db861ae428c55ac3f1d78ccbbaa32dabd55900e26c46f16176138fb6" exitCode=0 Jan 30 13:00:02 crc kubenswrapper[4703]: I0130 13:00:02.419086 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz" event={"ID":"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6","Type":"ContainerDied","Data":"043474a3db861ae428c55ac3f1d78ccbbaa32dabd55900e26c46f16176138fb6"} Jan 30 13:00:03 crc kubenswrapper[4703]: I0130 13:00:03.828606 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz" Jan 30 13:00:03 crc kubenswrapper[4703]: I0130 13:00:03.915771 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-secret-volume\") pod \"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6\" (UID: \"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6\") " Jan 30 13:00:03 crc kubenswrapper[4703]: I0130 13:00:03.916044 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-config-volume\") pod \"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6\" (UID: \"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6\") " Jan 30 13:00:03 crc kubenswrapper[4703]: I0130 13:00:03.916268 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ds48v\" (UniqueName: \"kubernetes.io/projected/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-kube-api-access-ds48v\") pod \"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6\" (UID: \"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6\") " Jan 30 13:00:03 crc kubenswrapper[4703]: I0130 13:00:03.917474 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-config-volume" (OuterVolumeSpecName: "config-volume") pod "50b425ab-f86e-480d-ae85-1ae1b7cbc8d6" (UID: "50b425ab-f86e-480d-ae85-1ae1b7cbc8d6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 30 13:00:03 crc kubenswrapper[4703]: I0130 13:00:03.918139 4703 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-config-volume\") on node \"crc\" DevicePath \"\"" Jan 30 13:00:03 crc kubenswrapper[4703]: I0130 13:00:03.925373 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "50b425ab-f86e-480d-ae85-1ae1b7cbc8d6" (UID: "50b425ab-f86e-480d-ae85-1ae1b7cbc8d6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 13:00:03 crc kubenswrapper[4703]: I0130 13:00:03.926040 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-kube-api-access-ds48v" (OuterVolumeSpecName: "kube-api-access-ds48v") pod "50b425ab-f86e-480d-ae85-1ae1b7cbc8d6" (UID: "50b425ab-f86e-480d-ae85-1ae1b7cbc8d6"). InnerVolumeSpecName "kube-api-access-ds48v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 13:00:04 crc kubenswrapper[4703]: I0130 13:00:04.021194 4703 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 30 13:00:04 crc kubenswrapper[4703]: I0130 13:00:04.021264 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ds48v\" (UniqueName: \"kubernetes.io/projected/50b425ab-f86e-480d-ae85-1ae1b7cbc8d6-kube-api-access-ds48v\") on node \"crc\" DevicePath \"\"" Jan 30 13:00:04 crc kubenswrapper[4703]: I0130 13:00:04.446095 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz" event={"ID":"50b425ab-f86e-480d-ae85-1ae1b7cbc8d6","Type":"ContainerDied","Data":"63b9192f1fa92b3c3c512bc8f4cd32d50c42a78e2d214aa3d0e423b4175243e0"} Jan 30 13:00:04 crc kubenswrapper[4703]: I0130 13:00:04.446170 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="63b9192f1fa92b3c3c512bc8f4cd32d50c42a78e2d214aa3d0e423b4175243e0" Jan 30 13:00:04 crc kubenswrapper[4703]: I0130 13:00:04.446295 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29496300-t94pz" Jan 30 13:00:04 crc kubenswrapper[4703]: I0130 13:00:04.928394 4703 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42"] Jan 30 13:00:04 crc kubenswrapper[4703]: I0130 13:00:04.940762 4703 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29496255-cdn42"] Jan 30 13:00:05 crc kubenswrapper[4703]: I0130 13:00:05.103096 4703 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b063a5d7-e03b-46db-85b0-75adab6bca8f" path="/var/lib/kubelet/pods/b063a5d7-e03b-46db-85b0-75adab6bca8f/volumes" Jan 30 13:00:13 crc kubenswrapper[4703]: I0130 13:00:13.038645 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 13:00:13 crc kubenswrapper[4703]: I0130 13:00:13.039406 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 13:00:14 crc kubenswrapper[4703]: I0130 13:00:14.087881 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 13:00:14 crc kubenswrapper[4703]: E0130 13:00:14.088861 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 13:00:28 crc kubenswrapper[4703]: I0130 13:00:28.086967 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 13:00:28 crc kubenswrapper[4703]: E0130 13:00:28.088532 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 13:00:40 crc kubenswrapper[4703]: I0130 13:00:40.087996 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 13:00:40 crc kubenswrapper[4703]: E0130 13:00:40.088865 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 13:00:42 crc kubenswrapper[4703]: I0130 13:00:42.823671 4703 patch_prober.go:28] interesting pod/machine-config-daemon-cx2rm container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 30 13:00:42 crc kubenswrapper[4703]: I0130 13:00:42.824661 4703 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 30 13:00:42 crc kubenswrapper[4703]: I0130 13:00:42.824738 4703 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" Jan 30 13:00:42 crc kubenswrapper[4703]: I0130 13:00:42.825837 4703 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cce881e424005d8a2cc96aeff857d9755c389a1eabfe2301906a5ed7ff41c251"} pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 30 13:00:42 crc kubenswrapper[4703]: I0130 13:00:42.825908 4703 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerName="machine-config-daemon" containerID="cri-o://cce881e424005d8a2cc96aeff857d9755c389a1eabfe2301906a5ed7ff41c251" gracePeriod=600 Jan 30 13:00:42 crc kubenswrapper[4703]: E0130 13:00:42.952769 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 13:00:43 crc kubenswrapper[4703]: I0130 13:00:43.953572 4703 generic.go:334] "Generic (PLEG): container finished" podID="ffea6197-b1fb-427b-adc5-bcc1c6108235" containerID="cce881e424005d8a2cc96aeff857d9755c389a1eabfe2301906a5ed7ff41c251" exitCode=0 Jan 30 13:00:43 crc kubenswrapper[4703]: I0130 13:00:43.953666 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" event={"ID":"ffea6197-b1fb-427b-adc5-bcc1c6108235","Type":"ContainerDied","Data":"cce881e424005d8a2cc96aeff857d9755c389a1eabfe2301906a5ed7ff41c251"} Jan 30 13:00:43 crc kubenswrapper[4703]: I0130 13:00:43.954178 4703 scope.go:117] "RemoveContainer" containerID="6b57ab84336d4fe8c2455961ec7c9b8c7fd3b27d7b48885d7b70a5e429c4b7d2" Jan 30 13:00:43 crc kubenswrapper[4703]: I0130 13:00:43.955218 4703 scope.go:117] "RemoveContainer" containerID="cce881e424005d8a2cc96aeff857d9755c389a1eabfe2301906a5ed7ff41c251" Jan 30 13:00:43 crc kubenswrapper[4703]: E0130 13:00:43.955796 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 13:00:52 crc kubenswrapper[4703]: I0130 13:00:52.087584 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 13:00:52 crc kubenswrapper[4703]: E0130 13:00:52.088839 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 13:00:55 crc kubenswrapper[4703]: I0130 13:00:55.302890 4703 scope.go:117] "RemoveContainer" containerID="57c983168859c3664156a0a6cc0727dedbdc2a5db854d7e49460e92cda694768" Jan 30 13:00:58 crc kubenswrapper[4703]: I0130 13:00:58.086753 4703 scope.go:117] "RemoveContainer" containerID="cce881e424005d8a2cc96aeff857d9755c389a1eabfe2301906a5ed7ff41c251" Jan 30 13:00:58 crc kubenswrapper[4703]: E0130 13:00:58.087821 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.227973 4703 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29496301-6xwxx"] Jan 30 13:01:00 crc kubenswrapper[4703]: E0130 13:01:00.229156 4703 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50b425ab-f86e-480d-ae85-1ae1b7cbc8d6" containerName="collect-profiles" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.229178 4703 state_mem.go:107] "Deleted CPUSet assignment" podUID="50b425ab-f86e-480d-ae85-1ae1b7cbc8d6" containerName="collect-profiles" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.229490 4703 memory_manager.go:354] "RemoveStaleState removing state" podUID="50b425ab-f86e-480d-ae85-1ae1b7cbc8d6" containerName="collect-profiles" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.239049 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29496301-6xwxx" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.251178 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29496301-6xwxx"] Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.349906 4703 patch_prober.go:28] interesting pod/perses-operator-5bf474d74f-n9zw2 container/perses-operator namespace/openshift-operators: Readiness probe status=failure output="Get \"http://10.217.0.44:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.350046 4703 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operators/perses-operator-5bf474d74f-n9zw2" podUID="74ad7d1c-6e7a-4f7e-a441-e301ec3593ab" containerName="perses-operator" probeResult="failure" output="Get \"http://10.217.0.44:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.435487 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-fernet-keys\") pod \"keystone-cron-29496301-6xwxx\" (UID: \"c40bd20d-c530-449e-86d6-0e6f3dc481df\") " pod="openstack/keystone-cron-29496301-6xwxx" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.435645 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-combined-ca-bundle\") pod \"keystone-cron-29496301-6xwxx\" (UID: \"c40bd20d-c530-449e-86d6-0e6f3dc481df\") " pod="openstack/keystone-cron-29496301-6xwxx" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.435739 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-config-data\") pod \"keystone-cron-29496301-6xwxx\" (UID: \"c40bd20d-c530-449e-86d6-0e6f3dc481df\") " pod="openstack/keystone-cron-29496301-6xwxx" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.435776 4703 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pkhn\" (UniqueName: \"kubernetes.io/projected/c40bd20d-c530-449e-86d6-0e6f3dc481df-kube-api-access-2pkhn\") pod \"keystone-cron-29496301-6xwxx\" (UID: \"c40bd20d-c530-449e-86d6-0e6f3dc481df\") " pod="openstack/keystone-cron-29496301-6xwxx" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.538559 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-fernet-keys\") pod \"keystone-cron-29496301-6xwxx\" (UID: \"c40bd20d-c530-449e-86d6-0e6f3dc481df\") " pod="openstack/keystone-cron-29496301-6xwxx" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.538708 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-combined-ca-bundle\") pod \"keystone-cron-29496301-6xwxx\" (UID: \"c40bd20d-c530-449e-86d6-0e6f3dc481df\") " pod="openstack/keystone-cron-29496301-6xwxx" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.538783 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-config-data\") pod \"keystone-cron-29496301-6xwxx\" (UID: \"c40bd20d-c530-449e-86d6-0e6f3dc481df\") " pod="openstack/keystone-cron-29496301-6xwxx" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.538813 4703 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pkhn\" (UniqueName: \"kubernetes.io/projected/c40bd20d-c530-449e-86d6-0e6f3dc481df-kube-api-access-2pkhn\") pod \"keystone-cron-29496301-6xwxx\" (UID: \"c40bd20d-c530-449e-86d6-0e6f3dc481df\") " pod="openstack/keystone-cron-29496301-6xwxx" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.549511 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-config-data\") pod \"keystone-cron-29496301-6xwxx\" (UID: \"c40bd20d-c530-449e-86d6-0e6f3dc481df\") " pod="openstack/keystone-cron-29496301-6xwxx" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.557661 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-fernet-keys\") pod \"keystone-cron-29496301-6xwxx\" (UID: \"c40bd20d-c530-449e-86d6-0e6f3dc481df\") " pod="openstack/keystone-cron-29496301-6xwxx" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.559528 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-combined-ca-bundle\") pod \"keystone-cron-29496301-6xwxx\" (UID: \"c40bd20d-c530-449e-86d6-0e6f3dc481df\") " pod="openstack/keystone-cron-29496301-6xwxx" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.559720 4703 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pkhn\" (UniqueName: \"kubernetes.io/projected/c40bd20d-c530-449e-86d6-0e6f3dc481df-kube-api-access-2pkhn\") pod \"keystone-cron-29496301-6xwxx\" (UID: \"c40bd20d-c530-449e-86d6-0e6f3dc481df\") " pod="openstack/keystone-cron-29496301-6xwxx" Jan 30 13:01:00 crc kubenswrapper[4703]: I0130 13:01:00.563853 4703 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29496301-6xwxx" Jan 30 13:01:01 crc kubenswrapper[4703]: I0130 13:01:01.102878 4703 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29496301-6xwxx"] Jan 30 13:01:01 crc kubenswrapper[4703]: I0130 13:01:01.627468 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29496301-6xwxx" event={"ID":"c40bd20d-c530-449e-86d6-0e6f3dc481df","Type":"ContainerStarted","Data":"7dd6c9dad15b03038cfc51b90d4a9d2057498ce11f9850eb51f0a5c4feee1e25"} Jan 30 13:01:01 crc kubenswrapper[4703]: I0130 13:01:01.628068 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29496301-6xwxx" event={"ID":"c40bd20d-c530-449e-86d6-0e6f3dc481df","Type":"ContainerStarted","Data":"6cd62a078b1493c8436047bfeafdc136154a821b58ab0ad4253492a84b641106"} Jan 30 13:01:01 crc kubenswrapper[4703]: I0130 13:01:01.653637 4703 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29496301-6xwxx" podStartSLOduration=1.653597524 podStartE2EDuration="1.653597524s" podCreationTimestamp="2026-01-30 13:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-30 13:01:01.651780936 +0000 UTC m=+3897.429602600" watchObservedRunningTime="2026-01-30 13:01:01.653597524 +0000 UTC m=+3897.431419178" Jan 30 13:01:04 crc kubenswrapper[4703]: I0130 13:01:04.679857 4703 generic.go:334] "Generic (PLEG): container finished" podID="c40bd20d-c530-449e-86d6-0e6f3dc481df" containerID="7dd6c9dad15b03038cfc51b90d4a9d2057498ce11f9850eb51f0a5c4feee1e25" exitCode=0 Jan 30 13:01:04 crc kubenswrapper[4703]: I0130 13:01:04.679973 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29496301-6xwxx" event={"ID":"c40bd20d-c530-449e-86d6-0e6f3dc481df","Type":"ContainerDied","Data":"7dd6c9dad15b03038cfc51b90d4a9d2057498ce11f9850eb51f0a5c4feee1e25"} Jan 30 13:01:05 crc kubenswrapper[4703]: I0130 13:01:05.098894 4703 scope.go:117] "RemoveContainer" containerID="9188639e22aebd99d4e3204f35c2ebd8a3ad3cdf8376051d6f232bb0e373c862" Jan 30 13:01:05 crc kubenswrapper[4703]: E0130 13:01:05.099464 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nova-scheduler-scheduler\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=nova-scheduler-scheduler pod=nova-scheduler-0_openstack(2fc19a6b-3cde-4bb5-9499-f5be846289da)\"" pod="openstack/nova-scheduler-0" podUID="2fc19a6b-3cde-4bb5-9499-f5be846289da" Jan 30 13:01:06 crc kubenswrapper[4703]: I0130 13:01:06.102607 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29496301-6xwxx" Jan 30 13:01:06 crc kubenswrapper[4703]: I0130 13:01:06.222081 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-config-data\") pod \"c40bd20d-c530-449e-86d6-0e6f3dc481df\" (UID: \"c40bd20d-c530-449e-86d6-0e6f3dc481df\") " Jan 30 13:01:06 crc kubenswrapper[4703]: I0130 13:01:06.222404 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-combined-ca-bundle\") pod \"c40bd20d-c530-449e-86d6-0e6f3dc481df\" (UID: \"c40bd20d-c530-449e-86d6-0e6f3dc481df\") " Jan 30 13:01:06 crc kubenswrapper[4703]: I0130 13:01:06.222573 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-fernet-keys\") pod \"c40bd20d-c530-449e-86d6-0e6f3dc481df\" (UID: \"c40bd20d-c530-449e-86d6-0e6f3dc481df\") " Jan 30 13:01:06 crc kubenswrapper[4703]: I0130 13:01:06.222687 4703 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2pkhn\" (UniqueName: \"kubernetes.io/projected/c40bd20d-c530-449e-86d6-0e6f3dc481df-kube-api-access-2pkhn\") pod \"c40bd20d-c530-449e-86d6-0e6f3dc481df\" (UID: \"c40bd20d-c530-449e-86d6-0e6f3dc481df\") " Jan 30 13:01:06 crc kubenswrapper[4703]: I0130 13:01:06.230378 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "c40bd20d-c530-449e-86d6-0e6f3dc481df" (UID: "c40bd20d-c530-449e-86d6-0e6f3dc481df"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 13:01:06 crc kubenswrapper[4703]: I0130 13:01:06.233839 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c40bd20d-c530-449e-86d6-0e6f3dc481df-kube-api-access-2pkhn" (OuterVolumeSpecName: "kube-api-access-2pkhn") pod "c40bd20d-c530-449e-86d6-0e6f3dc481df" (UID: "c40bd20d-c530-449e-86d6-0e6f3dc481df"). InnerVolumeSpecName "kube-api-access-2pkhn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 30 13:01:06 crc kubenswrapper[4703]: I0130 13:01:06.260912 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c40bd20d-c530-449e-86d6-0e6f3dc481df" (UID: "c40bd20d-c530-449e-86d6-0e6f3dc481df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 13:01:06 crc kubenswrapper[4703]: I0130 13:01:06.296160 4703 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-config-data" (OuterVolumeSpecName: "config-data") pod "c40bd20d-c530-449e-86d6-0e6f3dc481df" (UID: "c40bd20d-c530-449e-86d6-0e6f3dc481df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 30 13:01:06 crc kubenswrapper[4703]: I0130 13:01:06.325763 4703 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2pkhn\" (UniqueName: \"kubernetes.io/projected/c40bd20d-c530-449e-86d6-0e6f3dc481df-kube-api-access-2pkhn\") on node \"crc\" DevicePath \"\"" Jan 30 13:01:06 crc kubenswrapper[4703]: I0130 13:01:06.325802 4703 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-config-data\") on node \"crc\" DevicePath \"\"" Jan 30 13:01:06 crc kubenswrapper[4703]: I0130 13:01:06.325814 4703 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 30 13:01:06 crc kubenswrapper[4703]: I0130 13:01:06.325822 4703 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c40bd20d-c530-449e-86d6-0e6f3dc481df-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 30 13:01:06 crc kubenswrapper[4703]: I0130 13:01:06.706174 4703 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29496301-6xwxx" event={"ID":"c40bd20d-c530-449e-86d6-0e6f3dc481df","Type":"ContainerDied","Data":"6cd62a078b1493c8436047bfeafdc136154a821b58ab0ad4253492a84b641106"} Jan 30 13:01:06 crc kubenswrapper[4703]: I0130 13:01:06.706243 4703 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29496301-6xwxx" Jan 30 13:01:06 crc kubenswrapper[4703]: I0130 13:01:06.706255 4703 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6cd62a078b1493c8436047bfeafdc136154a821b58ab0ad4253492a84b641106" Jan 30 13:01:10 crc kubenswrapper[4703]: I0130 13:01:10.087605 4703 scope.go:117] "RemoveContainer" containerID="cce881e424005d8a2cc96aeff857d9755c389a1eabfe2301906a5ed7ff41c251" Jan 30 13:01:10 crc kubenswrapper[4703]: E0130 13:01:10.088188 4703 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-cx2rm_openshift-machine-config-operator(ffea6197-b1fb-427b-adc5-bcc1c6108235)\"" pod="openshift-machine-config-operator/machine-config-daemon-cx2rm" podUID="ffea6197-b1fb-427b-adc5-bcc1c6108235" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515137125646024457 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015137125647017375 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015137115527016514 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015137115527015464 5ustar corecore